Bitcoin ABC 0.32.5
P2P Digital Currency
net_processing.cpp
Go to the documentation of this file.
1// Copyright (c) 2009-2010 Satoshi Nakamoto
2// Copyright (c) 2009-2016 The Bitcoin Core developers
3// Distributed under the MIT software license, see the accompanying
4// file COPYING or http://www.opensource.org/licenses/mit-license.php.
5
6#include <net_processing.h>
7
8#include <addrman.h>
11#include <avalanche/processor.h>
12#include <avalanche/proof.h>
16#include <banman.h>
17#include <blockencodings.h>
18#include <blockfilter.h>
19#include <blockvalidity.h>
20#include <chain.h>
21#include <chainparams.h>
22#include <config.h>
23#include <consensus/amount.h>
25#include <hash.h>
26#include <headerssync.h>
28#include <invrequest.h>
29#include <kernel/chain.h>
31#include <merkleblock.h>
32#include <netbase.h>
33#include <netmessagemaker.h>
34#include <node/blockstorage.h>
35#include <node/miner.h>
36#include <policy/fees.h>
37#include <policy/policy.h>
38#include <policy/settings.h>
39#include <primitives/block.h>
41#include <random.h>
42#include <reverse_iterator.h>
43#include <scheduler.h>
44#include <streams.h>
45#include <timedata.h>
46#include <tinyformat.h>
47#include <txmempool.h>
48#include <txorphanage.h>
49#include <util/check.h>
50#include <util/strencodings.h>
51#include <util/trace.h>
52#include <validation.h>
53
54#include <boost/multi_index/hashed_index.hpp>
55#include <boost/multi_index/member.hpp>
56#include <boost/multi_index/ordered_index.hpp>
57#include <boost/multi_index_container.hpp>
58
59#include <algorithm>
60#include <atomic>
61#include <chrono>
62#include <functional>
63#include <future>
64#include <memory>
65#include <numeric>
66#include <typeinfo>
67
72static constexpr auto UNCONDITIONAL_RELAY_DELAY = 2min;
77static constexpr auto HEADERS_DOWNLOAD_TIMEOUT_BASE = 15min;
78static constexpr auto HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER = 1ms;
80static constexpr auto HEADERS_RESPONSE_TIME{2min};
87static constexpr auto CHAIN_SYNC_TIMEOUT{20min};
89static constexpr auto STALE_CHECK_INTERVAL{10min};
91static constexpr auto EXTRA_PEER_CHECK_INTERVAL{45s};
96static constexpr auto MINIMUM_CONNECT_TIME{30s};
98static constexpr uint64_t RANDOMIZER_ID_ADDRESS_RELAY = 0x3cac0035b5866b90ULL;
101static constexpr int STALE_RELAY_AGE_LIMIT = 30 * 24 * 60 * 60;
104static constexpr int HISTORICAL_BLOCK_AGE = 7 * 24 * 60 * 60;
108static constexpr auto PING_INTERVAL{2min};
110static const unsigned int MAX_LOCATOR_SZ = 101;
112static const unsigned int MAX_INV_SZ = 50000;
113static_assert(MAX_PROTOCOL_MESSAGE_LENGTH > MAX_INV_SZ * sizeof(CInv),
114 "Max protocol message length must be greater than largest "
115 "possible INV message");
116
118static constexpr auto GETAVAADDR_INTERVAL{2min};
119
124static constexpr auto AVALANCHE_AVAPROOFS_TIMEOUT{2min};
125
127static constexpr size_t MAX_AVALANCHE_STALLED_TXIDS_PER_PEER{100};
128
136
146
148 const std::chrono::seconds nonpref_peer_delay;
149
154 const std::chrono::seconds overloaded_peer_delay;
155
160 const std::chrono::microseconds getdata_interval;
161
167};
168
170 100, // max_peer_request_in_flight
171 5000, // max_peer_announcements
172 std::chrono::seconds(2), // nonpref_peer_delay
173 std::chrono::seconds(2), // overloaded_peer_delay
174 std::chrono::seconds(60), // getdata_interval
175 NetPermissionFlags::Relay, // bypass_request_limits_permissions
176};
177
179 100, // max_peer_request_in_flight
180 5000, // max_peer_announcements
181 std::chrono::seconds(2), // nonpref_peer_delay
182 std::chrono::seconds(2), // overloaded_peer_delay
183 std::chrono::seconds(60), // getdata_interval
185 BypassProofRequestLimits, // bypass_request_limits_permissions
186};
187
192static const unsigned int MAX_GETDATA_SZ = 1000;
196static const int MAX_BLOCKS_IN_TRANSIT_PER_PEER = 16;
202static constexpr auto BLOCK_STALLING_TIMEOUT_DEFAULT{2s};
204static constexpr auto BLOCK_STALLING_TIMEOUT_MAX{64s};
209static const int MAX_CMPCTBLOCK_DEPTH = 5;
214static const int MAX_BLOCKTXN_DEPTH = 10;
216 "MAX_BLOCKTXN_DEPTH too high");
224static const unsigned int BLOCK_DOWNLOAD_WINDOW = 1024;
229static constexpr double BLOCK_DOWNLOAD_TIMEOUT_BASE = 1;
233static constexpr double BLOCK_DOWNLOAD_TIMEOUT_PER_PEER = 0.5;
238static const unsigned int MAX_BLOCKS_TO_ANNOUNCE = 8;
240static const unsigned int NODE_NETWORK_LIMITED_MIN_BLOCKS = 288;
244static constexpr auto AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL{24h};
248static constexpr auto AVG_ADDRESS_BROADCAST_INTERVAL{30s};
250static constexpr auto ROTATE_ADDR_RELAY_DEST_INTERVAL{24h};
255static constexpr auto INBOUND_INVENTORY_BROADCAST_INTERVAL{5s};
260static constexpr unsigned int INVENTORY_BROADCAST_PER_SECOND = 7;
262static constexpr unsigned int INVENTORY_BROADCAST_MAX_PER_MB =
266static constexpr unsigned int INVENTORY_MAX_RECENT_RELAY = 3500;
275 std::chrono::seconds{1},
276 "INVENTORY_RELAY_MAX too low");
277
281static constexpr auto AVG_FEEFILTER_BROADCAST_INTERVAL{10min};
285static constexpr auto MAX_FEEFILTER_CHANGE_DELAY{5min};
290static constexpr uint32_t MAX_GETCFILTERS_SIZE = 1000;
295static constexpr uint32_t MAX_GETCFHEADERS_SIZE = 2000;
300static constexpr size_t MAX_PCT_ADDR_TO_SEND = 23;
305static constexpr double MAX_ADDR_RATE_PER_SECOND{0.1};
313static constexpr uint64_t CMPCTBLOCKS_VERSION{1};
314
315// Internal stuff
316namespace {
320struct QueuedBlock {
325 const CBlockIndex *pindex;
327 std::unique_ptr<PartiallyDownloadedBlock> partialBlock;
328};
329
330struct StalledTxId {
331 TxId txid;
332 std::chrono::seconds timeAdded;
333
334 StalledTxId(TxId txid_, std::chrono::seconds timeAdded_)
335 : txid(txid_), timeAdded(timeAdded_){};
336};
337
338struct by_txid {};
339struct by_time {};
340
341using StalledTxIdSet = boost::multi_index_container<
342 StalledTxId,
343 boost::multi_index::indexed_by<
344 // sort by txid
345 boost::multi_index::hashed_unique<
346 boost::multi_index::tag<by_txid>,
347 boost::multi_index::member<StalledTxId, TxId, &StalledTxId::txid>,
349 // sort by timeAdded
350 boost::multi_index::ordered_non_unique<
351 boost::multi_index::tag<by_time>,
352 boost::multi_index::member<StalledTxId, std::chrono::seconds,
353 &StalledTxId::timeAdded>>>>;
354
368struct Peer {
370 const NodeId m_id{0};
371
387 const ServiceFlags m_our_services;
388
390 std::atomic<ServiceFlags> m_their_services{NODE_NONE};
391
393 Mutex m_misbehavior_mutex;
398 bool m_should_discourage GUARDED_BY(m_misbehavior_mutex){false};
399
401 Mutex m_block_inv_mutex;
407 std::vector<BlockHash> m_blocks_for_inv_relay GUARDED_BY(m_block_inv_mutex);
413 std::vector<BlockHash>
414 m_blocks_for_headers_relay GUARDED_BY(m_block_inv_mutex);
415
422 BlockHash m_continuation_block GUARDED_BY(m_block_inv_mutex){};
423
425 std::atomic<int> m_starting_height{-1};
426
428 std::atomic<uint64_t> m_ping_nonce_sent{0};
430 std::atomic<std::chrono::microseconds> m_ping_start{0us};
432 std::atomic<bool> m_ping_queued{false};
433
441 Amount::zero()};
442 std::chrono::microseconds m_next_send_feefilter
444
445 struct TxRelay {
446 mutable RecursiveMutex m_bloom_filter_mutex;
455 bool m_relay_txs GUARDED_BY(m_bloom_filter_mutex){false};
460 std::unique_ptr<CBloomFilter>
461 m_bloom_filter PT_GUARDED_BY(m_bloom_filter_mutex)
462 GUARDED_BY(m_bloom_filter_mutex){nullptr};
463
465 CRollingBloomFilter m_recently_announced_invs GUARDED_BY(
467 0.000001};
468
469 mutable RecursiveMutex m_tx_inventory_mutex;
475 CRollingBloomFilter m_tx_inventory_known_filter
476 GUARDED_BY(m_tx_inventory_mutex){50000, 0.000001};
482 std::set<TxId> m_tx_inventory_to_send GUARDED_BY(m_tx_inventory_mutex);
488 bool m_send_mempool GUARDED_BY(m_tx_inventory_mutex){false};
490 std::atomic<std::chrono::seconds> m_last_mempool_req{0s};
495 std::chrono::microseconds
496 m_next_inv_send_time GUARDED_BY(m_tx_inventory_mutex){0};
497
502 std::atomic<Amount> m_fee_filter_received{Amount::zero()};
503
507 StalledTxIdSet
508 m_avalanche_stalled_txids GUARDED_BY(m_tx_inventory_mutex);
509 };
510
511 /*
512 * Initializes a TxRelay struct for this peer. Can be called at most once
513 * for a peer.
514 */
515 TxRelay *SetTxRelay() EXCLUSIVE_LOCKS_REQUIRED(!m_tx_relay_mutex) {
516 LOCK(m_tx_relay_mutex);
517 Assume(!m_tx_relay);
518 m_tx_relay = std::make_unique<Peer::TxRelay>();
519 return m_tx_relay.get();
520 };
521
522 TxRelay *GetTxRelay() EXCLUSIVE_LOCKS_REQUIRED(!m_tx_relay_mutex) {
523 return WITH_LOCK(m_tx_relay_mutex, return m_tx_relay.get());
524 };
525 const TxRelay *GetTxRelay() const
526 EXCLUSIVE_LOCKS_REQUIRED(!m_tx_relay_mutex) {
527 return WITH_LOCK(m_tx_relay_mutex, return m_tx_relay.get());
528 };
529
530 struct ProofRelay {
531 mutable RecursiveMutex m_proof_inventory_mutex;
532 std::set<avalanche::ProofId>
533 m_proof_inventory_to_send GUARDED_BY(m_proof_inventory_mutex);
534 // Prevent sending proof invs if the peer already knows about them
535 CRollingBloomFilter m_proof_inventory_known_filter
536 GUARDED_BY(m_proof_inventory_mutex){10000, 0.000001};
540 CRollingBloomFilter m_recently_announced_proofs GUARDED_BY(
542 0.000001};
543 std::chrono::microseconds m_next_inv_send_time{0};
544
546 sharedProofs;
547 std::atomic<std::chrono::seconds> lastSharedProofsUpdate{0s};
548 std::atomic<bool> compactproofs_requested{false};
549 };
550
555 const std::unique_ptr<ProofRelay> m_proof_relay;
556
560 std::vector<CAddress>
572 std::unique_ptr<CRollingBloomFilter>
590 std::atomic_bool m_addr_relay_enabled{false};
592 bool m_getaddr_sent GUARDED_BY(NetEventsInterface::g_msgproc_mutex){false};
594 mutable Mutex m_addr_send_times_mutex;
596 std::chrono::microseconds
597 m_next_addr_send GUARDED_BY(m_addr_send_times_mutex){0};
599 std::chrono::microseconds
600 m_next_local_addr_send GUARDED_BY(m_addr_send_times_mutex){0};
605 std::atomic_bool m_wants_addrv2{false};
607 bool m_getaddr_recvd GUARDED_BY(NetEventsInterface::g_msgproc_mutex){false};
609 mutable Mutex m_addr_token_bucket_mutex;
614 double m_addr_token_bucket GUARDED_BY(m_addr_token_bucket_mutex){1.0};
616 std::chrono::microseconds
617 m_addr_token_timestamp GUARDED_BY(NetEventsInterface::g_msgproc_mutex){
618 GetTime<std::chrono::microseconds>()};
620 std::atomic<uint64_t> m_addr_rate_limited{0};
625 std::atomic<uint64_t> m_addr_processed{0};
626
631 bool m_inv_triggered_getheaders_before_sync
633
635 Mutex m_getdata_requests_mutex;
637 std::deque<CInv> m_getdata_requests GUARDED_BY(m_getdata_requests_mutex);
638
640 NodeClock::time_point m_last_getheaders_timestamp
642
644 Mutex m_headers_sync_mutex;
649 std::unique_ptr<HeadersSyncState>
650 m_headers_sync PT_GUARDED_BY(m_headers_sync_mutex)
651 GUARDED_BY(m_headers_sync_mutex){};
652
654 std::atomic<bool> m_sent_sendheaders{false};
655
657 std::chrono::microseconds m_headers_sync_timeout
659
664 bool m_prefers_headers GUARDED_BY(NetEventsInterface::g_msgproc_mutex){
665 false};
666
667 explicit Peer(NodeId id, ServiceFlags our_services, bool fRelayProofs)
668 : m_id(id), m_our_services{our_services},
669 m_proof_relay(fRelayProofs ? std::make_unique<ProofRelay>()
670 : nullptr) {}
671
672private:
673 mutable Mutex m_tx_relay_mutex;
674
676 std::unique_ptr<TxRelay> m_tx_relay GUARDED_BY(m_tx_relay_mutex);
677};
678
679using PeerRef = std::shared_ptr<Peer>;
680
687struct CNodeState {
689 const CBlockIndex *pindexBestKnownBlock{nullptr};
691 BlockHash hashLastUnknownBlock{};
693 const CBlockIndex *pindexLastCommonBlock{nullptr};
695 const CBlockIndex *pindexBestHeaderSent{nullptr};
697 bool fSyncStarted{false};
700 std::chrono::microseconds m_stalling_since{0us};
701 std::list<QueuedBlock> vBlocksInFlight;
704 std::chrono::microseconds m_downloading_since{0us};
706 bool fPreferredDownload{false};
711 bool m_requested_hb_cmpctblocks{false};
713 bool m_provides_cmpctblocks{false};
714
741 struct ChainSyncTimeoutState {
744 std::chrono::seconds m_timeout{0s};
746 const CBlockIndex *m_work_header{nullptr};
748 bool m_sent_getheaders{false};
751 bool m_protect{false};
752 };
753
754 ChainSyncTimeoutState m_chain_sync;
755
757 int64_t m_last_block_announcement{0};
758
760 const bool m_is_inbound;
761
762 CNodeState(bool is_inbound) : m_is_inbound(is_inbound) {}
763};
764
765class PeerManagerImpl final : public PeerManager {
766public:
767 PeerManagerImpl(CConnman &connman, AddrMan &addrman, BanMan *banman,
768 ChainstateManager &chainman, CTxMemPool &pool,
769 avalanche::Processor *const avalanche, Options opts);
770
773 const std::shared_ptr<const CBlock> &pblock,
774 const CBlockIndex *pindexConnected) override
775 EXCLUSIVE_LOCKS_REQUIRED(!m_recent_confirmed_transactions_mutex);
776 void BlockDisconnected(const std::shared_ptr<const CBlock> &block,
777 const CBlockIndex *pindex) override
778 EXCLUSIVE_LOCKS_REQUIRED(!m_recent_confirmed_transactions_mutex);
779 void UpdatedBlockTip(const CBlockIndex *pindexNew,
780 const CBlockIndex *pindexFork,
781 bool fInitialDownload) override
782 EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
783 void BlockChecked(const CBlock &block,
784 const BlockValidationState &state) override
785 EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
786 void NewPoWValidBlock(const CBlockIndex *pindex,
787 const std::shared_ptr<const CBlock> &pblock) override
788 EXCLUSIVE_LOCKS_REQUIRED(!m_most_recent_block_mutex);
789
791 void InitializeNode(const Config &config, CNode &node,
792 ServiceFlags our_services) override
793 EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
794 void FinalizeNode(const Config &config, const CNode &node) override
795 EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !cs_proofrequest,
796 !m_headers_presync_mutex);
797 bool ProcessMessages(const Config &config, CNode *pfrom,
798 std::atomic<bool> &interrupt) override
799 EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex,
800 !m_recent_confirmed_transactions_mutex,
801 !m_most_recent_block_mutex, !cs_proofrequest,
802 !m_headers_presync_mutex, g_msgproc_mutex);
803 bool SendMessages(const Config &config, CNode *pto) override
804 EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex,
805 !m_recent_confirmed_transactions_mutex,
806 !m_most_recent_block_mutex, !cs_proofrequest,
807 g_msgproc_mutex);
808
810 void StartScheduledTasks(CScheduler &scheduler) override;
811 void CheckForStaleTipAndEvictPeers() override;
812 std::optional<std::string>
813 FetchBlock(const Config &config, NodeId peer_id,
814 const CBlockIndex &block_index) override;
815 bool GetNodeStateStats(NodeId nodeid, CNodeStateStats &stats) const override
816 EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
817 bool IgnoresIncomingTxs() override { return m_opts.ignore_incoming_txs; }
818 void SendPings() override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
819 void RelayTransaction(const TxId &txid) override
820 EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
821 void RelayProof(const avalanche::ProofId &proofid) override
822 EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
823 void SetBestHeight(int height) override { m_best_height = height; };
824 void UnitTestMisbehaving(NodeId peer_id) override
825 EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex) {
826 Misbehaving(*Assert(GetPeerRef(peer_id)), "");
827 }
828 void ProcessMessage(const Config &config, CNode &pfrom,
829 const std::string &msg_type, CDataStream &vRecv,
830 const std::chrono::microseconds time_received,
831 const std::atomic<bool> &interruptMsgProc) override
832 EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex,
833 !m_recent_confirmed_transactions_mutex,
834 !m_most_recent_block_mutex, !cs_proofrequest,
835 !m_headers_presync_mutex, g_msgproc_mutex);
837 int64_t time_in_seconds) override;
838
839private:
844 void ConsiderEviction(CNode &pto, Peer &peer,
845 std::chrono::seconds time_in_seconds)
846 EXCLUSIVE_LOCKS_REQUIRED(cs_main, g_msgproc_mutex);
847
852 void EvictExtraOutboundPeers(std::chrono::seconds now)
854
859 void ReattemptInitialBroadcast(CScheduler &scheduler)
860 EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
861
865 void UpdateAvalancheStatistics() const;
866
870 void AvalanchePeriodicNetworking(CScheduler &scheduler) const;
871
876 PeerRef GetPeerRef(NodeId id) const EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
877
882 PeerRef RemovePeer(NodeId id) EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
883
888 void Misbehaving(Peer &peer, const std::string &message);
889
900 void MaybePunishNodeForBlock(NodeId nodeid,
901 const BlockValidationState &state,
902 bool via_compact_block,
903 const std::string &message = "")
904 EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
905
910 void MaybePunishNodeForTx(NodeId nodeid, const TxValidationState &state,
911 const std::string &message = "")
912 EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
913
923 bool MaybeDiscourageAndDisconnect(CNode &pnode, Peer &peer);
924
939 void ProcessInvalidTx(NodeId nodeid, const CTransactionRef &tx,
940 const TxValidationState &result,
941 bool maybe_add_extra_compact_tx)
942 EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, g_msgproc_mutex, cs_main);
943
944 struct PackageToValidate {
945 const Package m_txns;
946 const std::vector<NodeId> m_senders;
948 explicit PackageToValidate(const CTransactionRef &parent,
949 const CTransactionRef &child,
950 NodeId parent_sender, NodeId child_sender)
951 : m_txns{parent, child}, m_senders{parent_sender, child_sender} {}
952
953 std::string ToString() const {
954 Assume(m_txns.size() == 2);
955 return strprintf(
956 "parent %s (sender=%d) + child %s (sender=%d)",
957 m_txns.front()->GetId().ToString(), m_senders.front(),
958 m_txns.back()->GetId().ToString(), m_senders.back());
959 }
960 };
961
967 void ProcessPackageResult(const PackageToValidate &package_to_validate,
968 const PackageMempoolAcceptResult &package_result)
969 EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, g_msgproc_mutex, cs_main);
970
977 std::optional<PackageToValidate> Find1P1CPackage(const CTransactionRef &ptx,
978 NodeId nodeid)
979 EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, g_msgproc_mutex, cs_main);
980
986 void ProcessValidTx(NodeId nodeid, const CTransactionRef &tx)
987 EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, g_msgproc_mutex, cs_main);
988
1004 bool ProcessOrphanTx(const Config &config, Peer &peer)
1005 EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, g_msgproc_mutex);
1006
1017 void ProcessHeadersMessage(const Config &config, CNode &pfrom, Peer &peer,
1018 std::vector<CBlockHeader> &&headers,
1019 bool via_compact_block)
1020 EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_headers_presync_mutex,
1021 g_msgproc_mutex);
1022
1023 // Various helpers for headers processing, invoked by
1024 // ProcessHeadersMessage()
1029 bool CheckHeadersPoW(const std::vector<CBlockHeader> &headers,
1030 const Consensus::Params &consensusParams, Peer &peer);
1032 arith_uint256 GetAntiDoSWorkThreshold();
1039 void HandleUnconnectingHeaders(CNode &pfrom, Peer &peer,
1040 const std::vector<CBlockHeader> &headers)
1041 EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
1043 bool
1044 CheckHeadersAreContinuous(const std::vector<CBlockHeader> &headers) const;
1064 bool IsContinuationOfLowWorkHeadersSync(Peer &peer, CNode &pfrom,
1065 std::vector<CBlockHeader> &headers)
1066 EXCLUSIVE_LOCKS_REQUIRED(peer.m_headers_sync_mutex,
1067 !m_headers_presync_mutex, g_msgproc_mutex);
1081 bool TryLowWorkHeadersSync(Peer &peer, CNode &pfrom,
1082 const CBlockIndex *chain_start_header,
1083 std::vector<CBlockHeader> &headers)
1084 EXCLUSIVE_LOCKS_REQUIRED(!peer.m_headers_sync_mutex, !m_peer_mutex,
1085 !m_headers_presync_mutex, g_msgproc_mutex);
1086
1091 bool IsAncestorOfBestHeaderOrTip(const CBlockIndex *header)
1093
1099 bool MaybeSendGetHeaders(CNode &pfrom, const CBlockLocator &locator,
1100 Peer &peer)
1101 EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
1105 void HeadersDirectFetchBlocks(const Config &config, CNode &pfrom,
1106 const CBlockIndex &last_header);
1108 void UpdatePeerStateForReceivedHeaders(CNode &pfrom, Peer &peer,
1109 const CBlockIndex &last_header,
1110 bool received_new_header,
1111 bool may_have_more_headers)
1112 EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
1113
1114 void SendBlockTransactions(CNode &pfrom, Peer &peer, const CBlock &block,
1115 const BlockTransactionsRequest &req);
1116
1122 void AddTxAnnouncement(const CNode &node, const TxId &txid,
1123 std::chrono::microseconds current_time)
1125
1131 void
1132 AddProofAnnouncement(const CNode &node, const avalanche::ProofId &proofid,
1133 std::chrono::microseconds current_time, bool preferred)
1134 EXCLUSIVE_LOCKS_REQUIRED(cs_proofrequest);
1135
1137 void PushNodeVersion(const Config &config, CNode &pnode, const Peer &peer);
1138
1145 void MaybeSendPing(CNode &node_to, Peer &peer,
1146 std::chrono::microseconds now);
1147
1149 void MaybeSendAddr(CNode &node, Peer &peer,
1150 std::chrono::microseconds current_time)
1151 EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
1152
1157 void MaybeSendSendHeaders(CNode &node, Peer &peer)
1158 EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
1159
1161 void MaybeSendFeefilter(CNode &node, Peer &peer,
1162 std::chrono::microseconds current_time)
1163 EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
1164
1174 void RelayAddress(NodeId originator, const CAddress &addr, bool fReachable)
1175 EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, g_msgproc_mutex);
1176
1178
1180 m_fee_filter_rounder GUARDED_BY(NetEventsInterface::g_msgproc_mutex);
1181
1182 const CChainParams &m_chainparams;
1183 CConnman &m_connman;
1184 AddrMan &m_addrman;
1189 BanMan *const m_banman;
1190 ChainstateManager &m_chainman;
1191 CTxMemPool &m_mempool;
1192 avalanche::Processor *const m_avalanche;
1194
1195 Mutex cs_proofrequest;
1197 m_proofrequest GUARDED_BY(cs_proofrequest);
1198
1200 std::atomic<int> m_best_height{-1};
1201
1203 std::chrono::seconds m_stale_tip_check_time GUARDED_BY(cs_main){0s};
1204
1205 const Options m_opts;
1206
1207 bool RejectIncomingTxs(const CNode &peer) const;
1208
1213 bool m_initial_sync_finished GUARDED_BY(cs_main){false};
1214
1219 mutable Mutex m_peer_mutex;
1226 std::map<NodeId, PeerRef> m_peer_map GUARDED_BY(m_peer_mutex);
1227
1229 std::map<NodeId, CNodeState> m_node_states GUARDED_BY(cs_main);
1230
1235 const CNodeState *State(NodeId pnode) const
1238 CNodeState *State(NodeId pnode) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
1239
1240 std::atomic<std::chrono::microseconds> m_next_inv_to_inbounds{0us};
1241
1243 int nSyncStarted GUARDED_BY(cs_main) = 0;
1244
1246 BlockHash
1247 m_last_block_inv_triggering_headers_sync GUARDED_BY(g_msgproc_mutex){};
1248
1255 std::map<BlockHash, std::pair<NodeId, bool>>
1256 mapBlockSource GUARDED_BY(cs_main);
1257
1259 int m_outbound_peers_with_protect_from_disconnect GUARDED_BY(cs_main) = 0;
1260
1262 int m_num_preferred_download_peers GUARDED_BY(cs_main){0};
1263
1265 std::atomic<std::chrono::seconds> m_block_stalling_timeout{
1267
1279 bool AlreadyHaveTx(const TxId &txid, bool include_reconsiderable)
1281 !m_recent_confirmed_transactions_mutex);
1282
1302 CRollingBloomFilter m_recent_rejects GUARDED_BY(::cs_main){120'000,
1303 0.000'001};
1304
1310 uint256 hashRecentRejectsChainTip GUARDED_BY(cs_main);
1311
1337 CRollingBloomFilter m_recent_rejects_package_reconsiderable
1338 GUARDED_BY(::cs_main){120'000, 0.000'001};
1339
1345 mutable Mutex m_recent_confirmed_transactions_mutex;
1346 CRollingBloomFilter m_recent_confirmed_transactions
1347 GUARDED_BY(m_recent_confirmed_transactions_mutex){24'000, 0.000'001};
1348
1356 std::chrono::microseconds
1357 NextInvToInbounds(std::chrono::microseconds now,
1358 std::chrono::seconds average_interval)
1359 EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
1360
1361 // All of the following cache a recent block, and are protected by
1362 // m_most_recent_block_mutex
1363 mutable Mutex m_most_recent_block_mutex;
1364 std::shared_ptr<const CBlock>
1365 m_most_recent_block GUARDED_BY(m_most_recent_block_mutex);
1366 std::shared_ptr<const CBlockHeaderAndShortTxIDs>
1367 m_most_recent_compact_block GUARDED_BY(m_most_recent_block_mutex);
1368 BlockHash m_most_recent_block_hash GUARDED_BY(m_most_recent_block_mutex);
1369 std::unique_ptr<const std::map<TxId, CTransactionRef>>
1370 m_most_recent_block_txs GUARDED_BY(m_most_recent_block_mutex);
1371
1372 // Data about the low-work headers synchronization, aggregated from all
1373 // peers' HeadersSyncStates.
1375 Mutex m_headers_presync_mutex;
1386 using HeadersPresyncStats =
1387 std::pair<arith_uint256, std::optional<std::pair<int64_t, uint32_t>>>;
1389 std::map<NodeId, HeadersPresyncStats>
1390 m_headers_presync_stats GUARDED_BY(m_headers_presync_mutex){};
1392 NodeId m_headers_presync_bestpeer GUARDED_BY(m_headers_presync_mutex){-1};
1394 std::atomic_bool m_headers_presync_should_signal{false};
1395
1399 int m_highest_fast_announce GUARDED_BY(::cs_main){0};
1400
1402 bool IsBlockRequested(const BlockHash &hash)
1404
1406 bool IsBlockRequestedFromOutbound(const BlockHash &hash)
1408
1417 void RemoveBlockRequest(const BlockHash &hash,
1418 std::optional<NodeId> from_peer)
1420
1427 bool BlockRequested(const Config &config, NodeId nodeid,
1428 const CBlockIndex &block,
1429 std::list<QueuedBlock>::iterator **pit = nullptr)
1431
1432 bool TipMayBeStale() EXCLUSIVE_LOCKS_REQUIRED(cs_main);
1433
1438 void FindNextBlocksToDownload(const Peer &peer, unsigned int count,
1439 std::vector<const CBlockIndex *> &vBlocks,
1440 NodeId &nodeStaller)
1442
1444 void TryDownloadingHistoricalBlocks(
1445 const Peer &peer, unsigned int count,
1446 std::vector<const CBlockIndex *> &vBlocks, const CBlockIndex *from_tip,
1447 const CBlockIndex *target_block) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
1448
1478 void FindNextBlocks(std::vector<const CBlockIndex *> &vBlocks,
1479 const Peer &peer, CNodeState *state,
1480 const CBlockIndex *pindexWalk, unsigned int count,
1481 int nWindowEnd, const CChain *activeChain = nullptr,
1482 NodeId *nodeStaller = nullptr)
1484
1486 typedef std::multimap<BlockHash,
1487 std::pair<NodeId, std::list<QueuedBlock>::iterator>>
1488 BlockDownloadMap;
1489 BlockDownloadMap mapBlocksInFlight GUARDED_BY(cs_main);
1490
1492 std::atomic<std::chrono::seconds> m_last_tip_update{0s};
1493
1498 CTransactionRef FindTxForGetData(const Peer &peer, const TxId &txid,
1499 const std::chrono::seconds mempool_req,
1500 const std::chrono::seconds now)
1502 EXCLUSIVE_LOCKS_REQUIRED(!m_most_recent_block_mutex,
1504
1505 void ProcessGetData(const Config &config, CNode &pfrom, Peer &peer,
1506 const std::atomic<bool> &interruptMsgProc)
1507 EXCLUSIVE_LOCKS_REQUIRED(!m_most_recent_block_mutex,
1508 peer.m_getdata_requests_mutex,
1511
1513 void ProcessBlock(const Config &config, CNode &node,
1514 const std::shared_ptr<const CBlock> &block,
1515 bool force_processing, bool min_pow_checked);
1516
1523 void MaybeSetPeerAsAnnouncingHeaderAndIDs(NodeId nodeid)
1525
1527 std::list<NodeId> lNodesAnnouncingHeaderAndIDs GUARDED_BY(cs_main);
1528
1530 int m_peers_downloading_from GUARDED_BY(cs_main) = 0;
1531
1532 void AddToCompactExtraTransactions(const CTransactionRef &tx)
1533 EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
1534
1542 std::vector<std::pair<TxHash, CTransactionRef>>
1543 vExtraTxnForCompact GUARDED_BY(g_msgproc_mutex);
1545 size_t vExtraTxnForCompactIt GUARDED_BY(g_msgproc_mutex) = 0;
1546
1550 void ProcessBlockAvailability(NodeId nodeid)
1555 void UpdateBlockAvailability(NodeId nodeid, const BlockHash &hash)
1557 bool CanDirectFetch() EXCLUSIVE_LOCKS_REQUIRED(cs_main);
1558
1565 bool BlockRequestAllowed(const CBlockIndex *pindex)
1567 bool AlreadyHaveBlock(const BlockHash &block_hash)
1569 bool AlreadyHaveProof(const avalanche::ProofId &proofid);
1570 void ProcessGetBlockData(const Config &config, CNode &pfrom, Peer &peer,
1571 const CInv &inv)
1572 EXCLUSIVE_LOCKS_REQUIRED(!m_most_recent_block_mutex);
1573
1593 bool PrepareBlockFilterRequest(CNode &node, Peer &peer,
1594 BlockFilterType filter_type,
1595 uint32_t start_height,
1596 const BlockHash &stop_hash,
1597 uint32_t max_height_diff,
1598 const CBlockIndex *&stop_index,
1599 BlockFilterIndex *&filter_index);
1600
1610 void ProcessGetCFilters(CNode &node, Peer &peer, CDataStream &vRecv);
1620 void ProcessGetCFHeaders(CNode &node, Peer &peer, CDataStream &vRecv);
1630 void ProcessGetCFCheckPt(CNode &node, Peer &peer, CDataStream &vRecv);
1631
1638 uint32_t GetAvalancheVoteForBlock(const BlockHash &hash) const
1640
1648 uint32_t GetAvalancheVoteForTx(const avalanche::Processor &avalanche,
1649 const TxId &id) const
1650 EXCLUSIVE_LOCKS_REQUIRED(!m_mempool.cs,
1651 !m_recent_confirmed_transactions_mutex);
1652
1660 bool SetupAddressRelay(const CNode &node, Peer &peer)
1661 EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
1662
1663 void AddAddressKnown(Peer &peer, const CAddress &addr)
1664 EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
1665 void PushAddress(Peer &peer, const CAddress &addr)
1666 EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
1667
1673 bool ReceivedAvalancheProof(CNode &node, Peer &peer,
1674 const avalanche::ProofRef &proof)
1675 EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !cs_proofrequest);
1676
1677 avalanche::ProofRef FindProofForGetData(const Peer &peer,
1678 const avalanche::ProofId &proofid,
1679 const std::chrono::seconds now)
1681
1682 bool isPreferredDownloadPeer(const CNode &pfrom);
1683};
1684
1685const CNodeState *PeerManagerImpl::State(NodeId pnode) const
1687 std::map<NodeId, CNodeState>::const_iterator it = m_node_states.find(pnode);
1688 if (it == m_node_states.end()) {
1689 return nullptr;
1690 }
1691
1692 return &it->second;
1693}
1694
1695CNodeState *PeerManagerImpl::State(NodeId pnode)
1697 return const_cast<CNodeState *>(std::as_const(*this).State(pnode));
1698}
1699
1705static bool IsAddrCompatible(const Peer &peer, const CAddress &addr) {
1706 return peer.m_wants_addrv2 || addr.IsAddrV1Compatible();
1707}
1708
1709void PeerManagerImpl::AddAddressKnown(Peer &peer, const CAddress &addr) {
1710 assert(peer.m_addr_known);
1711 peer.m_addr_known->insert(addr.GetKey());
1712}
1713
1714void PeerManagerImpl::PushAddress(Peer &peer, const CAddress &addr) {
1715 // Known checking here is only to save space from duplicates.
1716 // Before sending, we'll filter it again for known addresses that were
1717 // added after addresses were pushed.
1718 assert(peer.m_addr_known);
1719 if (addr.IsValid() && !peer.m_addr_known->contains(addr.GetKey()) &&
1720 IsAddrCompatible(peer, addr)) {
1721 if (peer.m_addrs_to_send.size() >= m_opts.max_addr_to_send) {
1722 peer.m_addrs_to_send[m_rng.randrange(peer.m_addrs_to_send.size())] =
1723 addr;
1724 } else {
1725 peer.m_addrs_to_send.push_back(addr);
1726 }
1727 }
1728}
1729
1730static void AddKnownTx(Peer &peer, const TxId &txid) {
1731 auto tx_relay = peer.GetTxRelay();
1732 if (!tx_relay) {
1733 return;
1734 }
1735
1736 LOCK(tx_relay->m_tx_inventory_mutex);
1737 tx_relay->m_tx_inventory_known_filter.insert(txid);
1738}
1739
1740static void AddKnownProof(Peer &peer, const avalanche::ProofId &proofid) {
1741 if (peer.m_proof_relay != nullptr) {
1742 LOCK(peer.m_proof_relay->m_proof_inventory_mutex);
1743 peer.m_proof_relay->m_proof_inventory_known_filter.insert(proofid);
1744 }
1745}
1746
1747bool PeerManagerImpl::isPreferredDownloadPeer(const CNode &pfrom) {
1748 LOCK(cs_main);
1749 const CNodeState *state = State(pfrom.GetId());
1750 return state && state->fPreferredDownload;
1751}
1753static bool CanServeBlocks(const Peer &peer) {
1754 return peer.m_their_services & (NODE_NETWORK | NODE_NETWORK_LIMITED);
1755}
1756
1761static bool IsLimitedPeer(const Peer &peer) {
1762 return (!(peer.m_their_services & NODE_NETWORK) &&
1763 (peer.m_their_services & NODE_NETWORK_LIMITED));
1764}
1765
1766std::chrono::microseconds
1767PeerManagerImpl::NextInvToInbounds(std::chrono::microseconds now,
1768 std::chrono::seconds average_interval) {
1769 if (m_next_inv_to_inbounds.load() < now) {
1770 // If this function were called from multiple threads simultaneously
1771 // it would possible that both update the next send variable, and return
1772 // a different result to their caller. This is not possible in practice
1773 // as only the net processing thread invokes this function.
1774 m_next_inv_to_inbounds =
1775 now + m_rng.rand_exp_duration(average_interval);
1776 }
1777 return m_next_inv_to_inbounds;
1778}
1779
1780bool PeerManagerImpl::IsBlockRequested(const BlockHash &hash) {
1781 return mapBlocksInFlight.count(hash);
1782}
1783
1784bool PeerManagerImpl::IsBlockRequestedFromOutbound(const BlockHash &hash) {
1785 for (auto range = mapBlocksInFlight.equal_range(hash);
1786 range.first != range.second; range.first++) {
1787 auto [nodeid, block_it] = range.first->second;
1788 CNodeState &nodestate = *Assert(State(nodeid));
1789 if (!nodestate.m_is_inbound) {
1790 return true;
1791 }
1792 }
1793
1794 return false;
1795}
1796
1797void PeerManagerImpl::RemoveBlockRequest(const BlockHash &hash,
1798 std::optional<NodeId> from_peer) {
1799 auto range = mapBlocksInFlight.equal_range(hash);
1800 if (range.first == range.second) {
1801 // Block was not requested from any peer
1802 return;
1803 }
1804
1805 // We should not have requested too many of this block
1806 Assume(mapBlocksInFlight.count(hash) <= MAX_CMPCTBLOCKS_INFLIGHT_PER_BLOCK);
1807
1808 while (range.first != range.second) {
1809 auto [node_id, list_it] = range.first->second;
1810
1811 if (from_peer && *from_peer != node_id) {
1812 range.first++;
1813 continue;
1814 }
1815
1816 CNodeState &state = *Assert(State(node_id));
1817
1818 if (state.vBlocksInFlight.begin() == list_it) {
1819 // First block on the queue was received, update the start download
1820 // time for the next one
1821 state.m_downloading_since =
1822 std::max(state.m_downloading_since,
1823 GetTime<std::chrono::microseconds>());
1824 }
1825 state.vBlocksInFlight.erase(list_it);
1826
1827 if (state.vBlocksInFlight.empty()) {
1828 // Last validated block on the queue for this peer was received.
1829 m_peers_downloading_from--;
1830 }
1831 state.m_stalling_since = 0us;
1832
1833 range.first = mapBlocksInFlight.erase(range.first);
1834 }
1835}
1836
1837bool PeerManagerImpl::BlockRequested(const Config &config, NodeId nodeid,
1838 const CBlockIndex &block,
1839 std::list<QueuedBlock>::iterator **pit) {
1840 const BlockHash &hash{block.GetBlockHash()};
1841
1842 CNodeState *state = State(nodeid);
1843 assert(state != nullptr);
1844
1845 Assume(mapBlocksInFlight.count(hash) <= MAX_CMPCTBLOCKS_INFLIGHT_PER_BLOCK);
1846
1847 // Short-circuit most stuff in case it is from the same node
1848 for (auto range = mapBlocksInFlight.equal_range(hash);
1849 range.first != range.second; range.first++) {
1850 if (range.first->second.first == nodeid) {
1851 if (pit) {
1852 *pit = &range.first->second.second;
1853 }
1854 return false;
1855 }
1856 }
1857
1858 // Make sure it's not being fetched already from same peer.
1859 RemoveBlockRequest(hash, nodeid);
1860
1861 std::list<QueuedBlock>::iterator it = state->vBlocksInFlight.insert(
1862 state->vBlocksInFlight.end(),
1863 {&block, std::unique_ptr<PartiallyDownloadedBlock>(
1864 pit ? new PartiallyDownloadedBlock(config, &m_mempool)
1865 : nullptr)});
1866 if (state->vBlocksInFlight.size() == 1) {
1867 // We're starting a block download (batch) from this peer.
1868 state->m_downloading_since = GetTime<std::chrono::microseconds>();
1869 m_peers_downloading_from++;
1870 }
1871
1872 auto itInFlight = mapBlocksInFlight.insert(
1873 std::make_pair(hash, std::make_pair(nodeid, it)));
1874
1875 if (pit) {
1876 *pit = &itInFlight->second.second;
1877 }
1878
1879 return true;
1880}
1881
1882void PeerManagerImpl::MaybeSetPeerAsAnnouncingHeaderAndIDs(NodeId nodeid) {
1884
1885 // When in -blocksonly mode, never request high-bandwidth mode from peers.
1886 // Our mempool will not contain the transactions necessary to reconstruct
1887 // the compact block.
1888 if (m_opts.ignore_incoming_txs) {
1889 return;
1890 }
1891
1892 CNodeState *nodestate = State(nodeid);
1893 if (!nodestate) {
1894 LogPrint(BCLog::NET, "node state unavailable: peer=%d\n", nodeid);
1895 return;
1896 }
1897 if (!nodestate->m_provides_cmpctblocks) {
1898 return;
1899 }
1900 int num_outbound_hb_peers = 0;
1901 for (std::list<NodeId>::iterator it = lNodesAnnouncingHeaderAndIDs.begin();
1902 it != lNodesAnnouncingHeaderAndIDs.end(); it++) {
1903 if (*it == nodeid) {
1904 lNodesAnnouncingHeaderAndIDs.erase(it);
1905 lNodesAnnouncingHeaderAndIDs.push_back(nodeid);
1906 return;
1907 }
1908 CNodeState *state = State(*it);
1909 if (state != nullptr && !state->m_is_inbound) {
1910 ++num_outbound_hb_peers;
1911 }
1912 }
1913 if (nodestate->m_is_inbound) {
1914 // If we're adding an inbound HB peer, make sure we're not removing
1915 // our last outbound HB peer in the process.
1916 if (lNodesAnnouncingHeaderAndIDs.size() >= 3 &&
1917 num_outbound_hb_peers == 1) {
1918 CNodeState *remove_node =
1919 State(lNodesAnnouncingHeaderAndIDs.front());
1920 if (remove_node != nullptr && !remove_node->m_is_inbound) {
1921 // Put the HB outbound peer in the second slot, so that it
1922 // doesn't get removed.
1923 std::swap(lNodesAnnouncingHeaderAndIDs.front(),
1924 *std::next(lNodesAnnouncingHeaderAndIDs.begin()));
1925 }
1926 }
1927 }
1928 m_connman.ForNode(nodeid, [this](CNode *pfrom) EXCLUSIVE_LOCKS_REQUIRED(
1929 ::cs_main) {
1931 if (lNodesAnnouncingHeaderAndIDs.size() >= 3) {
1932 // As per BIP152, we only get 3 of our peers to announce
1933 // blocks using compact encodings.
1934 m_connman.ForNode(
1935 lNodesAnnouncingHeaderAndIDs.front(), [this](CNode *pnodeStop) {
1936 m_connman.PushMessage(
1937 pnodeStop, CNetMsgMaker(pnodeStop->GetCommonVersion())
1938 .Make(NetMsgType::SENDCMPCT,
1939 /*high_bandwidth=*/false,
1940 /*version=*/CMPCTBLOCKS_VERSION));
1941 // save BIP152 bandwidth state: we select peer to be
1942 // low-bandwidth
1943 pnodeStop->m_bip152_highbandwidth_to = false;
1944 return true;
1945 });
1946 lNodesAnnouncingHeaderAndIDs.pop_front();
1947 }
1948 m_connman.PushMessage(pfrom,
1951 /*high_bandwidth=*/true,
1952 /*version=*/CMPCTBLOCKS_VERSION));
1953 // save BIP152 bandwidth state: we select peer to be high-bandwidth
1954 pfrom->m_bip152_highbandwidth_to = true;
1955 lNodesAnnouncingHeaderAndIDs.push_back(pfrom->GetId());
1956 return true;
1957 });
1958}
1959
1960bool PeerManagerImpl::TipMayBeStale() {
1962 const Consensus::Params &consensusParams = m_chainparams.GetConsensus();
1963 if (m_last_tip_update.load() == 0s) {
1964 m_last_tip_update = GetTime<std::chrono::seconds>();
1965 }
1966 return m_last_tip_update.load() <
1967 GetTime<std::chrono::seconds>() -
1968 std::chrono::seconds{consensusParams.nPowTargetSpacing *
1969 3} &&
1970 mapBlocksInFlight.empty();
1971}
1972
1973bool PeerManagerImpl::CanDirectFetch() {
1974 return m_chainman.ActiveChain().Tip()->Time() >
1975 GetAdjustedTime() -
1976 m_chainparams.GetConsensus().PowTargetSpacing() * 20;
1977}
1978
1979static bool PeerHasHeader(CNodeState *state, const CBlockIndex *pindex)
1981 if (state->pindexBestKnownBlock &&
1982 pindex == state->pindexBestKnownBlock->GetAncestor(pindex->nHeight)) {
1983 return true;
1984 }
1985 if (state->pindexBestHeaderSent &&
1986 pindex == state->pindexBestHeaderSent->GetAncestor(pindex->nHeight)) {
1987 return true;
1988 }
1989 return false;
1990}
1991
1992void PeerManagerImpl::ProcessBlockAvailability(NodeId nodeid) {
1993 CNodeState *state = State(nodeid);
1994 assert(state != nullptr);
1995
1996 if (!state->hashLastUnknownBlock.IsNull()) {
1997 const CBlockIndex *pindex =
1998 m_chainman.m_blockman.LookupBlockIndex(state->hashLastUnknownBlock);
1999 if (pindex && pindex->nChainWork > 0) {
2000 if (state->pindexBestKnownBlock == nullptr ||
2001 pindex->nChainWork >= state->pindexBestKnownBlock->nChainWork) {
2002 state->pindexBestKnownBlock = pindex;
2003 }
2004 state->hashLastUnknownBlock.SetNull();
2005 }
2006 }
2007}
2008
2009void PeerManagerImpl::UpdateBlockAvailability(NodeId nodeid,
2010 const BlockHash &hash) {
2011 CNodeState *state = State(nodeid);
2012 assert(state != nullptr);
2013
2014 ProcessBlockAvailability(nodeid);
2015
2016 const CBlockIndex *pindex = m_chainman.m_blockman.LookupBlockIndex(hash);
2017 if (pindex && pindex->nChainWork > 0) {
2018 // An actually better block was announced.
2019 if (state->pindexBestKnownBlock == nullptr ||
2020 pindex->nChainWork >= state->pindexBestKnownBlock->nChainWork) {
2021 state->pindexBestKnownBlock = pindex;
2022 }
2023 } else {
2024 // An unknown block was announced; just assume that the latest one is
2025 // the best one.
2026 state->hashLastUnknownBlock = hash;
2027 }
2028}
2029
2030// Logic for calculating which blocks to download from a given peer, given
2031// our current tip.
2032void PeerManagerImpl::FindNextBlocksToDownload(
2033 const Peer &peer, unsigned int count,
2034 std::vector<const CBlockIndex *> &vBlocks, NodeId &nodeStaller) {
2035 if (count == 0) {
2036 return;
2037 }
2038
2039 vBlocks.reserve(vBlocks.size() + count);
2040 CNodeState *state = State(peer.m_id);
2041 assert(state != nullptr);
2042
2043 // Make sure pindexBestKnownBlock is up to date, we'll need it.
2044 ProcessBlockAvailability(peer.m_id);
2045
2046 if (state->pindexBestKnownBlock == nullptr ||
2047 state->pindexBestKnownBlock->nChainWork <
2048 m_chainman.ActiveChain().Tip()->nChainWork ||
2049 state->pindexBestKnownBlock->nChainWork <
2050 m_chainman.MinimumChainWork()) {
2051 // This peer has nothing interesting.
2052 return;
2053 }
2054
2055 // When we sync with AssumeUtxo and discover the snapshot is not in the
2056 // peer's best chain, abort: We can't reorg to this chain due to missing
2057 // undo data until the background sync has finished, so downloading blocks
2058 // from it would be futile.
2059 const CBlockIndex *snap_base{m_chainman.GetSnapshotBaseBlock()};
2060 if (snap_base && state->pindexBestKnownBlock->GetAncestor(
2061 snap_base->nHeight) != snap_base) {
2063 "Not downloading blocks from peer=%d, which doesn't have the "
2064 "snapshot block in its best chain.\n",
2065 peer.m_id);
2066 return;
2067 }
2068
2069 // Bootstrap quickly by guessing a parent of our best tip is the forking
2070 // point. Guessing wrong in either direction is not a problem. Also reset
2071 // pindexLastCommonBlock after a snapshot was loaded, so that blocks after
2072 // the snapshot will be prioritised for download.
2073 if (state->pindexLastCommonBlock == nullptr ||
2074 (snap_base &&
2075 state->pindexLastCommonBlock->nHeight < snap_base->nHeight)) {
2076 state->pindexLastCommonBlock =
2077 m_chainman
2078 .ActiveChain()[std::min(state->pindexBestKnownBlock->nHeight,
2079 m_chainman.ActiveChain().Height())];
2080 }
2081
2082 // If the peer reorganized, our previous pindexLastCommonBlock may not be an
2083 // ancestor of its current tip anymore. Go back enough to fix that.
2084 state->pindexLastCommonBlock = LastCommonAncestor(
2085 state->pindexLastCommonBlock, state->pindexBestKnownBlock);
2086 if (state->pindexLastCommonBlock == state->pindexBestKnownBlock) {
2087 return;
2088 }
2089
2090 const CBlockIndex *pindexWalk = state->pindexLastCommonBlock;
2091 // Never fetch further than the best block we know the peer has, or more
2092 // than BLOCK_DOWNLOAD_WINDOW + 1 beyond the last linked block we have in
2093 // common with this peer. The +1 is so we can detect stalling, namely if we
2094 // would be able to download that next block if the window were 1 larger.
2095 int nWindowEnd =
2096 state->pindexLastCommonBlock->nHeight + BLOCK_DOWNLOAD_WINDOW;
2097
2098 FindNextBlocks(vBlocks, peer, state, pindexWalk, count, nWindowEnd,
2099 &m_chainman.ActiveChain(), &nodeStaller);
2100}
2101
2102void PeerManagerImpl::TryDownloadingHistoricalBlocks(
2103 const Peer &peer, unsigned int count,
2104 std::vector<const CBlockIndex *> &vBlocks, const CBlockIndex *from_tip,
2105 const CBlockIndex *target_block) {
2106 Assert(from_tip);
2107 Assert(target_block);
2108
2109 if (vBlocks.size() >= count) {
2110 return;
2111 }
2112
2113 vBlocks.reserve(count);
2114 CNodeState *state = Assert(State(peer.m_id));
2115
2116 if (state->pindexBestKnownBlock == nullptr ||
2117 state->pindexBestKnownBlock->GetAncestor(target_block->nHeight) !=
2118 target_block) {
2119 // This peer can't provide us the complete series of blocks leading up
2120 // to the assumeutxo snapshot base.
2121 //
2122 // Presumably this peer's chain has less work than our ActiveChain()'s
2123 // tip, or else we will eventually crash when we try to reorg to it. Let
2124 // other logic deal with whether we disconnect this peer.
2125 //
2126 // TODO at some point in the future, we might choose to request what
2127 // blocks this peer does have from the historical chain, despite it not
2128 // having a complete history beneath the snapshot base.
2129 return;
2130 }
2131
2132 FindNextBlocks(vBlocks, peer, state, from_tip, count,
2133 std::min<int>(from_tip->nHeight + BLOCK_DOWNLOAD_WINDOW,
2134 target_block->nHeight));
2135}
2136
2137void PeerManagerImpl::FindNextBlocks(std::vector<const CBlockIndex *> &vBlocks,
2138 const Peer &peer, CNodeState *state,
2139 const CBlockIndex *pindexWalk,
2140 unsigned int count, int nWindowEnd,
2141 const CChain *activeChain,
2142 NodeId *nodeStaller) {
2143 std::vector<const CBlockIndex *> vToFetch;
2144 int nMaxHeight =
2145 std::min<int>(state->pindexBestKnownBlock->nHeight, nWindowEnd + 1);
2146 NodeId waitingfor = -1;
2147 while (pindexWalk->nHeight < nMaxHeight) {
2148 // Read up to 128 (or more, if more blocks than that are needed)
2149 // successors of pindexWalk (towards pindexBestKnownBlock) into
2150 // vToFetch. We fetch 128, because CBlockIndex::GetAncestor may be as
2151 // expensive as iterating over ~100 CBlockIndex* entries anyway.
2152 int nToFetch = std::min(nMaxHeight - pindexWalk->nHeight,
2153 std::max<int>(count - vBlocks.size(), 128));
2154 vToFetch.resize(nToFetch);
2155 pindexWalk = state->pindexBestKnownBlock->GetAncestor(
2156 pindexWalk->nHeight + nToFetch);
2157 vToFetch[nToFetch - 1] = pindexWalk;
2158 for (unsigned int i = nToFetch - 1; i > 0; i--) {
2159 vToFetch[i - 1] = vToFetch[i]->pprev;
2160 }
2161
2162 // Iterate over those blocks in vToFetch (in forward direction), adding
2163 // the ones that are not yet downloaded and not in flight to vBlocks. In
2164 // the meantime, update pindexLastCommonBlock as long as all ancestors
2165 // are already downloaded, or if it's already part of our chain (and
2166 // therefore don't need it even if pruned).
2167 for (const CBlockIndex *pindex : vToFetch) {
2168 if (!pindex->IsValid(BlockValidity::TREE)) {
2169 // We consider the chain that this peer is on invalid.
2170 return;
2171 }
2172 if (pindex->nStatus.hasData() ||
2173 (activeChain && activeChain->Contains(pindex))) {
2174 if (activeChain && pindex->HaveNumChainTxs()) {
2175 state->pindexLastCommonBlock = pindex;
2176 }
2177 } else if (!IsBlockRequested(pindex->GetBlockHash())) {
2178 // The block is not already downloaded, and not yet in flight.
2179 if (pindex->nHeight > nWindowEnd) {
2180 // We reached the end of the window.
2181 if (vBlocks.size() == 0 && waitingfor != peer.m_id) {
2182 // We aren't able to fetch anything, but we would be if
2183 // the download window was one larger.
2184 if (nodeStaller) {
2185 *nodeStaller = waitingfor;
2186 }
2187 }
2188 return;
2189 }
2190 vBlocks.push_back(pindex);
2191 if (vBlocks.size() == count) {
2192 return;
2193 }
2194 } else if (waitingfor == -1) {
2195 // This is the first already-in-flight block.
2196 waitingfor =
2197 mapBlocksInFlight.lower_bound(pindex->GetBlockHash())
2198 ->second.first;
2199 }
2200 }
2201 }
2202}
2203
2204} // namespace
2205
2206template <class InvId>
2208 const InvRequestTracker<InvId> &requestTracker,
2209 const DataRequestParameters &requestParams) {
2210 return !node.HasPermission(
2211 requestParams.bypass_request_limits_permissions) &&
2212 requestTracker.Count(node.GetId()) >=
2213 requestParams.max_peer_announcements;
2214}
2215
2223template <class InvId>
2224static std::chrono::microseconds
2226 const InvRequestTracker<InvId> &requestTracker,
2227 const DataRequestParameters &requestParams,
2228 std::chrono::microseconds current_time, bool preferred) {
2229 auto delay = std::chrono::microseconds{0};
2230
2231 if (!preferred) {
2232 delay += requestParams.nonpref_peer_delay;
2233 }
2234
2235 if (!node.HasPermission(requestParams.bypass_request_limits_permissions) &&
2236 requestTracker.CountInFlight(node.GetId()) >=
2237 requestParams.max_peer_request_in_flight) {
2238 delay += requestParams.overloaded_peer_delay;
2239 }
2240
2241 return current_time + delay;
2242}
2243
2244void PeerManagerImpl::PushNodeVersion(const Config &config, CNode &pnode,
2245 const Peer &peer) {
2246 uint64_t my_services{peer.m_our_services};
2247 const int64_t nTime{count_seconds(GetTime<std::chrono::seconds>())};
2248 uint64_t nonce = pnode.GetLocalNonce();
2249 const int nNodeStartingHeight{m_best_height};
2250 NodeId nodeid = pnode.GetId();
2251 CAddress addr = pnode.addr;
2252 uint64_t extraEntropy = pnode.GetLocalExtraEntropy();
2253
2254 CService addr_you =
2255 addr.IsRoutable() && !IsProxy(addr) && addr.IsAddrV1Compatible()
2256 ? addr
2257 : CService();
2258 uint64_t your_services{addr.nServices};
2259
2260 const bool tx_relay{!RejectIncomingTxs(pnode)};
2261 m_connman.PushMessage(
2262 // your_services, addr_you: Together the pre-version-31402 serialization
2263 // of CAddress "addrYou" (without nTime)
2264 // my_services, CService(): Together the pre-version-31402 serialization
2265 // of CAddress "addrMe" (without nTime)
2266 &pnode,
2268 .Make(NetMsgType::VERSION, PROTOCOL_VERSION, my_services, nTime,
2269 your_services, WithParams(CNetAddr::V1, addr_you),
2270 my_services, WithParams(CNetAddr::V1, CService{}), nonce,
2271 userAgent(config), nNodeStartingHeight, tx_relay,
2272 extraEntropy));
2273
2274 if (fLogIPs) {
2276 "send version message: version %d, blocks=%d, them=%s, "
2277 "txrelay=%d, peer=%d\n",
2278 PROTOCOL_VERSION, nNodeStartingHeight, addr_you.ToString(),
2279 tx_relay, nodeid);
2280 } else {
2282 "send version message: version %d, blocks=%d, "
2283 "txrelay=%d, peer=%d\n",
2284 PROTOCOL_VERSION, nNodeStartingHeight, tx_relay, nodeid);
2285 }
2286}
2287
2288void PeerManagerImpl::AddTxAnnouncement(
2289 const CNode &node, const TxId &txid,
2290 std::chrono::microseconds current_time) {
2291 // For m_txrequest and state
2293
2294 if (TooManyAnnouncements(node, m_txrequest, TX_REQUEST_PARAMS)) {
2295 return;
2296 }
2297
2298 const bool preferred = isPreferredDownloadPeer(node);
2299 auto reqtime = ComputeRequestTime(node, m_txrequest, TX_REQUEST_PARAMS,
2300 current_time, preferred);
2301
2302 m_txrequest.ReceivedInv(node.GetId(), txid, preferred, reqtime);
2303}
2304
2305void PeerManagerImpl::AddProofAnnouncement(
2306 const CNode &node, const avalanche::ProofId &proofid,
2307 std::chrono::microseconds current_time, bool preferred) {
2308 // For m_proofrequest
2309 AssertLockHeld(cs_proofrequest);
2310
2311 if (TooManyAnnouncements(node, m_proofrequest, PROOF_REQUEST_PARAMS)) {
2312 return;
2313 }
2314
2315 auto reqtime = ComputeRequestTime(
2316 node, m_proofrequest, PROOF_REQUEST_PARAMS, current_time, preferred);
2317
2318 m_proofrequest.ReceivedInv(node.GetId(), proofid, preferred, reqtime);
2319}
2320
2321void PeerManagerImpl::UpdateLastBlockAnnounceTime(NodeId node,
2322 int64_t time_in_seconds) {
2323 LOCK(cs_main);
2324 CNodeState *state = State(node);
2325 if (state) {
2326 state->m_last_block_announcement = time_in_seconds;
2327 }
2328}
2329
2330void PeerManagerImpl::InitializeNode(const Config &config, CNode &node,
2331 ServiceFlags our_services) {
2332 NodeId nodeid = node.GetId();
2333 {
2334 LOCK(cs_main);
2335 m_node_states.emplace_hint(m_node_states.end(),
2336 std::piecewise_construct,
2337 std::forward_as_tuple(nodeid),
2338 std::forward_as_tuple(node.IsInboundConn()));
2339 assert(m_txrequest.Count(nodeid) == 0);
2340 }
2341
2342 if (NetPermissions::HasFlag(node.m_permission_flags,
2344 our_services = static_cast<ServiceFlags>(our_services | NODE_BLOOM);
2345 }
2346
2347 PeerRef peer = std::make_shared<Peer>(nodeid, our_services, !!m_avalanche);
2348 {
2349 LOCK(m_peer_mutex);
2350 m_peer_map.emplace_hint(m_peer_map.end(), nodeid, peer);
2351 }
2352 if (!node.IsInboundConn()) {
2353 PushNodeVersion(config, node, *peer);
2354 }
2355}
2356
2357void PeerManagerImpl::ReattemptInitialBroadcast(CScheduler &scheduler) {
2358 std::set<TxId> unbroadcast_txids = m_mempool.GetUnbroadcastTxs();
2359
2360 for (const TxId &txid : unbroadcast_txids) {
2361 // Sanity check: all unbroadcast txns should exist in the mempool
2362 if (m_mempool.exists(txid)) {
2363 RelayTransaction(txid);
2364 } else {
2365 m_mempool.RemoveUnbroadcastTx(txid, true);
2366 }
2367 }
2368
2369 if (m_avalanche) {
2370 // Get and sanitize the list of proofids to broadcast. The RelayProof
2371 // call is done in a second loop to avoid locking cs_vNodes while
2372 // cs_peerManager is locked which would cause a potential deadlock due
2373 // to reversed lock order.
2374 auto unbroadcasted_proofids =
2375 m_avalanche->withPeerManager([&](avalanche::PeerManager &pm) {
2376 auto unbroadcasted_proofids = pm.getUnbroadcastProofs();
2377
2378 auto it = unbroadcasted_proofids.begin();
2379 while (it != unbroadcasted_proofids.end()) {
2380 // Sanity check: all unbroadcast proofs should be bound to a
2381 // peer in the peermanager
2382 if (!pm.isBoundToPeer(*it)) {
2383 pm.removeUnbroadcastProof(*it);
2384 it = unbroadcasted_proofids.erase(it);
2385 continue;
2386 }
2387
2388 ++it;
2389 }
2390
2391 return unbroadcasted_proofids;
2392 });
2393
2394 // Remaining proofids are the ones to broadcast
2395 for (const auto &proofid : unbroadcasted_proofids) {
2396 RelayProof(proofid);
2397 }
2398 }
2399
2400 // Schedule next run for 10-15 minutes in the future.
2401 // We add randomness on every cycle to avoid the possibility of P2P
2402 // fingerprinting.
2403 const auto reattemptBroadcastInterval =
2404 10min + FastRandomContext().randrange<std::chrono::milliseconds>(5min);
2405 scheduler.scheduleFromNow([&] { ReattemptInitialBroadcast(scheduler); },
2406 reattemptBroadcastInterval);
2407}
2408
2409void PeerManagerImpl::UpdateAvalancheStatistics() const {
2410 m_connman.ForEachNode([](CNode *pnode) {
2412 });
2413}
2414
2415void PeerManagerImpl::AvalanchePeriodicNetworking(CScheduler &scheduler) const {
2416 const auto now = GetTime<std::chrono::seconds>();
2417 std::vector<NodeId> avanode_ids;
2418 bool fQuorumEstablished;
2419 bool fShouldRequestMoreNodes;
2420
2421 if (!m_avalanche) {
2422 // Not enabled or not ready yet, retry later
2423 goto scheduleLater;
2424 }
2425
2426 m_avalanche->sendDelayedAvahello();
2427
2428 fQuorumEstablished = m_avalanche->isQuorumEstablished();
2429 fShouldRequestMoreNodes =
2430 m_avalanche->withPeerManager([&](avalanche::PeerManager &pm) {
2431 return pm.shouldRequestMoreNodes();
2432 });
2433
2434 m_connman.ForEachNode([&](CNode *pnode) {
2435 // Build a list of the avalanche peers nodeids
2436 if (pnode->m_avalanche_enabled) {
2437 avanode_ids.push_back(pnode->GetId());
2438 }
2439
2440 PeerRef peer = GetPeerRef(pnode->GetId());
2441 if (peer == nullptr) {
2442 return;
2443 }
2444 // If a proof radix tree timed out, cleanup
2445 if (peer->m_proof_relay &&
2446 now > (peer->m_proof_relay->lastSharedProofsUpdate.load() +
2448 peer->m_proof_relay->sharedProofs = {};
2449 }
2450 });
2451
2452 if (avanode_ids.empty()) {
2453 // No node is available for messaging, retry later
2454 goto scheduleLater;
2455 }
2456
2457 Shuffle(avanode_ids.begin(), avanode_ids.end(), FastRandomContext());
2458
2459 // Request avalanche addresses from our peers
2460 for (NodeId avanodeId : avanode_ids) {
2461 const bool sentGetavaaddr =
2462 m_connman.ForNode(avanodeId, [&](CNode *pavanode) {
2463 if (!fQuorumEstablished || !pavanode->IsInboundConn()) {
2464 m_connman.PushMessage(
2465 pavanode, CNetMsgMaker(pavanode->GetCommonVersion())
2466 .Make(NetMsgType::GETAVAADDR));
2467 PeerRef peer = GetPeerRef(avanodeId);
2468 WITH_LOCK(peer->m_addr_token_bucket_mutex,
2469 peer->m_addr_token_bucket +=
2470 m_opts.max_addr_to_send);
2471 return true;
2472 }
2473 return false;
2474 });
2475
2476 // If we have no reason to believe that we need more nodes, only request
2477 // addresses from one of our peers.
2478 if (sentGetavaaddr && fQuorumEstablished && !fShouldRequestMoreNodes) {
2479 break;
2480 }
2481 }
2482
2483 if (m_chainman.IsInitialBlockDownload()) {
2484 // Don't request proofs while in IBD. We're likely to orphan them
2485 // because we don't have the UTXOs.
2486 goto scheduleLater;
2487 }
2488
2489 // If we never had an avaproofs message yet, be kind and only request to a
2490 // subset of our peers as we expect a ton of avaproofs message in the
2491 // process.
2492 if (m_avalanche->getAvaproofsNodeCounter() == 0) {
2493 avanode_ids.resize(std::min<size_t>(avanode_ids.size(), 3));
2494 }
2495
2496 for (NodeId nodeid : avanode_ids) {
2497 // Send a getavaproofs to all of our peers
2498 m_connman.ForNode(nodeid, [&](CNode *pavanode) {
2499 PeerRef peer = GetPeerRef(nodeid);
2500 if (peer->m_proof_relay) {
2501 m_connman.PushMessage(pavanode,
2502 CNetMsgMaker(pavanode->GetCommonVersion())
2504
2505 peer->m_proof_relay->compactproofs_requested = true;
2506 }
2507 return true;
2508 });
2509 }
2510
2511scheduleLater:
2512 // Schedule next run for 2-5 minutes in the future.
2513 // We add randomness on every cycle to avoid the possibility of P2P
2514 // fingerprinting.
2515 const auto avalanchePeriodicNetworkingInterval =
2516 2min + FastRandomContext().randrange<std::chrono::milliseconds>(3min);
2517 scheduler.scheduleFromNow([&] { AvalanchePeriodicNetworking(scheduler); },
2518 avalanchePeriodicNetworkingInterval);
2519}
2520
2521void PeerManagerImpl::FinalizeNode(const Config &config, const CNode &node) {
2522 NodeId nodeid = node.GetId();
2523 {
2524 LOCK(cs_main);
2525 {
2526 // We remove the PeerRef from g_peer_map here, but we don't always
2527 // destruct the Peer. Sometimes another thread is still holding a
2528 // PeerRef, so the refcount is >= 1. Be careful not to do any
2529 // processing here that assumes Peer won't be changed before it's
2530 // destructed.
2531 PeerRef peer = RemovePeer(nodeid);
2532 assert(peer != nullptr);
2533 LOCK(m_peer_mutex);
2534 m_peer_map.erase(nodeid);
2535 }
2536 CNodeState *state = State(nodeid);
2537 assert(state != nullptr);
2538
2539 if (state->fSyncStarted) {
2540 nSyncStarted--;
2541 }
2542
2543 for (const QueuedBlock &entry : state->vBlocksInFlight) {
2544 auto range =
2545 mapBlocksInFlight.equal_range(entry.pindex->GetBlockHash());
2546 while (range.first != range.second) {
2547 auto [node_id, list_it] = range.first->second;
2548 if (node_id != nodeid) {
2549 range.first++;
2550 } else {
2551 range.first = mapBlocksInFlight.erase(range.first);
2552 }
2553 }
2554 }
2555 m_mempool.withOrphanage([nodeid](TxOrphanage &orphanage) {
2556 orphanage.EraseForPeer(nodeid);
2557 });
2558 m_txrequest.DisconnectedPeer(nodeid);
2559 m_num_preferred_download_peers -= state->fPreferredDownload;
2560 m_peers_downloading_from -= (!state->vBlocksInFlight.empty());
2561 assert(m_peers_downloading_from >= 0);
2562 m_outbound_peers_with_protect_from_disconnect -=
2563 state->m_chain_sync.m_protect;
2564 assert(m_outbound_peers_with_protect_from_disconnect >= 0);
2565
2566 m_node_states.erase(nodeid);
2567
2568 if (m_node_states.empty()) {
2569 // Do a consistency check after the last peer is removed.
2570 assert(mapBlocksInFlight.empty());
2571 assert(m_num_preferred_download_peers == 0);
2572 assert(m_peers_downloading_from == 0);
2573 assert(m_outbound_peers_with_protect_from_disconnect == 0);
2574 assert(m_txrequest.Size() == 0);
2575 assert(m_mempool.withOrphanage([](const TxOrphanage &orphanage) {
2576 return orphanage.Size();
2577 }) == 0);
2578 }
2579 }
2580
2581 if (node.fSuccessfullyConnected && !node.IsBlockOnlyConn() &&
2582 !node.IsInboundConn()) {
2583 // Only change visible addrman state for full outbound peers. We don't
2584 // call Connected() for feeler connections since they don't have
2585 // fSuccessfullyConnected set.
2586 m_addrman.Connected(node.addr);
2587 }
2588 {
2589 LOCK(m_headers_presync_mutex);
2590 m_headers_presync_stats.erase(nodeid);
2591 }
2592
2593 WITH_LOCK(cs_proofrequest, m_proofrequest.DisconnectedPeer(nodeid));
2594
2595 LogPrint(BCLog::NET, "Cleared nodestate for peer=%d\n", nodeid);
2596}
2597
2598PeerRef PeerManagerImpl::GetPeerRef(NodeId id) const {
2599 LOCK(m_peer_mutex);
2600 auto it = m_peer_map.find(id);
2601 return it != m_peer_map.end() ? it->second : nullptr;
2602}
2603
2604PeerRef PeerManagerImpl::RemovePeer(NodeId id) {
2605 PeerRef ret;
2606 LOCK(m_peer_mutex);
2607 auto it = m_peer_map.find(id);
2608 if (it != m_peer_map.end()) {
2609 ret = std::move(it->second);
2610 m_peer_map.erase(it);
2611 }
2612 return ret;
2613}
2614
2615bool PeerManagerImpl::GetNodeStateStats(NodeId nodeid,
2616 CNodeStateStats &stats) const {
2617 {
2618 LOCK(cs_main);
2619 const CNodeState *state = State(nodeid);
2620 if (state == nullptr) {
2621 return false;
2622 }
2623 stats.nSyncHeight = state->pindexBestKnownBlock
2624 ? state->pindexBestKnownBlock->nHeight
2625 : -1;
2626 stats.nCommonHeight = state->pindexLastCommonBlock
2627 ? state->pindexLastCommonBlock->nHeight
2628 : -1;
2629 for (const QueuedBlock &queue : state->vBlocksInFlight) {
2630 if (queue.pindex) {
2631 stats.vHeightInFlight.push_back(queue.pindex->nHeight);
2632 }
2633 }
2634 }
2635
2636 PeerRef peer = GetPeerRef(nodeid);
2637 if (peer == nullptr) {
2638 return false;
2639 }
2640 stats.their_services = peer->m_their_services;
2641 stats.m_starting_height = peer->m_starting_height;
2642 // It is common for nodes with good ping times to suddenly become lagged,
2643 // due to a new block arriving or other large transfer.
2644 // Merely reporting pingtime might fool the caller into thinking the node
2645 // was still responsive, since pingtime does not update until the ping is
2646 // complete, which might take a while. So, if a ping is taking an unusually
2647 // long time in flight, the caller can immediately detect that this is
2648 // happening.
2649 auto ping_wait{0us};
2650 if ((0 != peer->m_ping_nonce_sent) &&
2651 (0 != peer->m_ping_start.load().count())) {
2652 ping_wait =
2653 GetTime<std::chrono::microseconds>() - peer->m_ping_start.load();
2654 }
2655
2656 if (auto tx_relay = peer->GetTxRelay()) {
2657 stats.m_relay_txs = WITH_LOCK(tx_relay->m_bloom_filter_mutex,
2658 return tx_relay->m_relay_txs);
2659 stats.m_fee_filter_received = tx_relay->m_fee_filter_received.load();
2660 } else {
2661 stats.m_relay_txs = false;
2663 }
2664
2665 stats.m_ping_wait = ping_wait;
2666 stats.m_addr_processed = peer->m_addr_processed.load();
2667 stats.m_addr_rate_limited = peer->m_addr_rate_limited.load();
2668 stats.m_addr_relay_enabled = peer->m_addr_relay_enabled.load();
2669 {
2670 LOCK(peer->m_headers_sync_mutex);
2671 if (peer->m_headers_sync) {
2672 stats.presync_height = peer->m_headers_sync->GetPresyncHeight();
2673 }
2674 }
2675
2676 return true;
2677}
2678
2679void PeerManagerImpl::AddToCompactExtraTransactions(const CTransactionRef &tx) {
2680 if (m_opts.max_extra_txs <= 0) {
2681 return;
2682 }
2683
2684 if (!vExtraTxnForCompact.size()) {
2685 vExtraTxnForCompact.resize(m_opts.max_extra_txs);
2686 }
2687
2688 vExtraTxnForCompact[vExtraTxnForCompactIt] =
2689 std::make_pair(tx->GetHash(), tx);
2690 vExtraTxnForCompactIt = (vExtraTxnForCompactIt + 1) % m_opts.max_extra_txs;
2691}
2692
2693void PeerManagerImpl::Misbehaving(Peer &peer, const std::string &message) {
2694 LOCK(peer.m_misbehavior_mutex);
2695
2696 const std::string message_prefixed =
2697 message.empty() ? "" : (": " + message);
2698 peer.m_should_discourage = true;
2699 LogPrint(BCLog::NET, "Misbehaving: peer=%d%s\n", peer.m_id,
2700 message_prefixed);
2701}
2702
2703void PeerManagerImpl::MaybePunishNodeForBlock(NodeId nodeid,
2704 const BlockValidationState &state,
2705 bool via_compact_block,
2706 const std::string &message) {
2707 PeerRef peer{GetPeerRef(nodeid)};
2708 switch (state.GetResult()) {
2710 break;
2712 // We didn't try to process the block because the header chain may
2713 // have too little work.
2714 break;
2715 // The node is providing invalid data:
2718 if (!via_compact_block) {
2719 if (peer) {
2720 Misbehaving(*peer, message);
2721 }
2722 return;
2723 }
2724 break;
2726 LOCK(cs_main);
2727 CNodeState *node_state = State(nodeid);
2728 if (node_state == nullptr) {
2729 break;
2730 }
2731
2732 // Ban outbound (but not inbound) peers if on an invalid chain.
2733 // Exempt HB compact block peers. Manual connections are always
2734 // protected from discouragement.
2735 if (!via_compact_block && !node_state->m_is_inbound) {
2736 if (peer) {
2737 Misbehaving(*peer, message);
2738 }
2739 return;
2740 }
2741 break;
2742 }
2746 if (peer) {
2747 Misbehaving(*peer, message);
2748 }
2749 return;
2750 // Conflicting (but not necessarily invalid) data or different policy:
2752 if (peer) {
2753 Misbehaving(*peer, message);
2754 }
2755 return;
2757 break;
2758 }
2759 if (message != "") {
2760 LogPrint(BCLog::NET, "peer=%d: %s\n", nodeid, message);
2761 }
2762}
2763
2764void PeerManagerImpl::MaybePunishNodeForTx(NodeId nodeid,
2765 const TxValidationState &state,
2766 const std::string &message) {
2767 PeerRef peer{GetPeerRef(nodeid)};
2768 switch (state.GetResult()) {
2770 break;
2771 // The node is providing invalid data:
2773 if (peer) {
2774 Misbehaving(*peer, message);
2775 }
2776 return;
2777 // Conflicting (but not necessarily invalid) data or different policy:
2790 break;
2791 }
2792 if (message != "") {
2793 LogPrint(BCLog::NET, "peer=%d: %s\n", nodeid, message);
2794 }
2795}
2796
2797bool PeerManagerImpl::BlockRequestAllowed(const CBlockIndex *pindex) {
2799 if (m_chainman.ActiveChain().Contains(pindex)) {
2800 return true;
2801 }
2802 return pindex->IsValid(BlockValidity::SCRIPTS) &&
2803 (m_chainman.m_best_header != nullptr) &&
2804 (m_chainman.m_best_header->GetBlockTime() - pindex->GetBlockTime() <
2807 *m_chainman.m_best_header, *pindex, *m_chainman.m_best_header,
2808 m_chainparams.GetConsensus()) < STALE_RELAY_AGE_LIMIT);
2809}
2810
2811std::optional<std::string>
2812PeerManagerImpl::FetchBlock(const Config &config, NodeId peer_id,
2813 const CBlockIndex &block_index) {
2814 if (m_chainman.m_blockman.LoadingBlocks()) {
2815 return "Loading blocks ...";
2816 }
2817
2818 LOCK(cs_main);
2819
2820 // Ensure this peer exists and hasn't been disconnected
2821 CNodeState *state = State(peer_id);
2822 if (state == nullptr) {
2823 return "Peer does not exist";
2824 }
2825
2826 // Forget about all prior requests
2827 RemoveBlockRequest(block_index.GetBlockHash(), std::nullopt);
2828
2829 // Mark block as in-flight
2830 if (!BlockRequested(config, peer_id, block_index)) {
2831 return "Already requested from this peer";
2832 }
2833
2834 // Construct message to request the block
2835 const BlockHash &hash{block_index.GetBlockHash()};
2836 const std::vector<CInv> invs{CInv(MSG_BLOCK, hash)};
2837
2838 // Send block request message to the peer
2839 if (!m_connman.ForNode(peer_id, [this, &invs](CNode *node) {
2840 const CNetMsgMaker msgMaker(node->GetCommonVersion());
2841 this->m_connman.PushMessage(
2842 node, msgMaker.Make(NetMsgType::GETDATA, invs));
2843 return true;
2844 })) {
2845 return "Node not fully connected";
2846 }
2847
2848 LogPrint(BCLog::NET, "Requesting block %s from peer=%d\n", hash.ToString(),
2849 peer_id);
2850 return std::nullopt;
2851}
2852
2853std::unique_ptr<PeerManager>
2854PeerManager::make(CConnman &connman, AddrMan &addrman, BanMan *banman,
2855 ChainstateManager &chainman, CTxMemPool &pool,
2856 avalanche::Processor *const avalanche, Options opts) {
2857 return std::make_unique<PeerManagerImpl>(connman, addrman, banman, chainman,
2858 pool, avalanche, opts);
2859}
2860
2861PeerManagerImpl::PeerManagerImpl(CConnman &connman, AddrMan &addrman,
2862 BanMan *banman, ChainstateManager &chainman,
2863 CTxMemPool &pool,
2865 Options opts)
2866 : m_rng{opts.deterministic_rng},
2867 m_fee_filter_rounder{CFeeRate{DEFAULT_MIN_RELAY_TX_FEE_PER_KB}, m_rng},
2868 m_chainparams(chainman.GetParams()), m_connman(connman),
2869 m_addrman(addrman), m_banman(banman), m_chainman(chainman),
2870 m_mempool(pool), m_avalanche(avalanche), m_opts{opts} {}
2871
2872void PeerManagerImpl::StartScheduledTasks(CScheduler &scheduler) {
2873 // Stale tip checking and peer eviction are on two different timers, but we
2874 // don't want them to get out of sync due to drift in the scheduler, so we
2875 // combine them in one function and schedule at the quicker (peer-eviction)
2876 // timer.
2877 static_assert(
2879 "peer eviction timer should be less than stale tip check timer");
2880 scheduler.scheduleEvery(
2881 [this]() {
2882 this->CheckForStaleTipAndEvictPeers();
2883 return true;
2884 },
2885 std::chrono::seconds{EXTRA_PEER_CHECK_INTERVAL});
2886
2887 // schedule next run for 10-15 minutes in the future
2888 const auto reattemptBroadcastInterval =
2889 10min + FastRandomContext().randrange<std::chrono::milliseconds>(5min);
2890 scheduler.scheduleFromNow([&] { ReattemptInitialBroadcast(scheduler); },
2891 reattemptBroadcastInterval);
2892
2893 // Update the avalanche statistics on a schedule
2894 scheduler.scheduleEvery(
2895 [this]() {
2896 UpdateAvalancheStatistics();
2897 return true;
2898 },
2900
2901 // schedule next run for 2-5 minutes in the future
2902 const auto avalanchePeriodicNetworkingInterval =
2903 2min + FastRandomContext().randrange<std::chrono::milliseconds>(3min);
2904 scheduler.scheduleFromNow([&] { AvalanchePeriodicNetworking(scheduler); },
2905 avalanchePeriodicNetworkingInterval);
2906}
2907
2914void PeerManagerImpl::BlockConnected(
2915 ChainstateRole role, const std::shared_ptr<const CBlock> &pblock,
2916 const CBlockIndex *pindex) {
2917 // Update this for all chainstate roles so that we don't mistakenly see
2918 // peers helping us do background IBD as having a stale tip.
2919 m_last_tip_update = GetTime<std::chrono::seconds>();
2920
2921 // In case the dynamic timeout was doubled once or more, reduce it slowly
2922 // back to its default value
2923 auto stalling_timeout = m_block_stalling_timeout.load();
2924 Assume(stalling_timeout >= BLOCK_STALLING_TIMEOUT_DEFAULT);
2925 if (stalling_timeout != BLOCK_STALLING_TIMEOUT_DEFAULT) {
2926 const auto new_timeout =
2927 std::max(std::chrono::duration_cast<std::chrono::seconds>(
2928 stalling_timeout * 0.85),
2930 if (m_block_stalling_timeout.compare_exchange_strong(stalling_timeout,
2931 new_timeout)) {
2932 LogPrint(BCLog::NET, "Decreased stalling timeout to %d seconds\n",
2933 count_seconds(new_timeout));
2934 }
2935 }
2936
2937 // The following tasks can be skipped since we don't maintain a mempool for
2938 // the ibd/background chainstate.
2939 if (role == ChainstateRole::BACKGROUND) {
2940 return;
2941 }
2942 m_mempool.withOrphanage([&pblock](TxOrphanage &orphanage) {
2943 orphanage.EraseForBlock(*pblock);
2944 });
2945 m_mempool.withConflicting([&pblock](TxConflicting &conflicting) {
2946 conflicting.EraseForBlock(*pblock);
2947 });
2948
2949 {
2950 LOCK(m_recent_confirmed_transactions_mutex);
2951 for (const CTransactionRef &ptx : pblock->vtx) {
2952 m_recent_confirmed_transactions.insert(ptx->GetId());
2953 }
2954 }
2955 {
2956 LOCK(cs_main);
2957 for (const auto &ptx : pblock->vtx) {
2958 m_txrequest.ForgetInvId(ptx->GetId());
2959 }
2960 }
2961}
2962
2963void PeerManagerImpl::BlockDisconnected(
2964 const std::shared_ptr<const CBlock> &block, const CBlockIndex *pindex) {
2965 // To avoid relay problems with transactions that were previously
2966 // confirmed, clear our filter of recently confirmed transactions whenever
2967 // there's a reorg.
2968 // This means that in a 1-block reorg (where 1 block is disconnected and
2969 // then another block reconnected), our filter will drop to having only one
2970 // block's worth of transactions in it, but that should be fine, since
2971 // presumably the most common case of relaying a confirmed transaction
2972 // should be just after a new block containing it is found.
2973 LOCK(m_recent_confirmed_transactions_mutex);
2974 m_recent_confirmed_transactions.reset();
2975}
2976
2981void PeerManagerImpl::NewPoWValidBlock(
2982 const CBlockIndex *pindex, const std::shared_ptr<const CBlock> &pblock) {
2983 std::shared_ptr<const CBlockHeaderAndShortTxIDs> pcmpctblock =
2984 std::make_shared<const CBlockHeaderAndShortTxIDs>(*pblock);
2985 const CNetMsgMaker msgMaker(PROTOCOL_VERSION);
2986
2987 LOCK(cs_main);
2988
2989 if (pindex->nHeight <= m_highest_fast_announce) {
2990 return;
2991 }
2992 m_highest_fast_announce = pindex->nHeight;
2993
2994 BlockHash hashBlock(pblock->GetHash());
2995 const std::shared_future<CSerializedNetMsg> lazy_ser{
2996 std::async(std::launch::deferred, [&] {
2997 return msgMaker.Make(NetMsgType::CMPCTBLOCK, *pcmpctblock);
2998 })};
2999
3000 {
3001 auto most_recent_block_txs =
3002 std::make_unique<std::map<TxId, CTransactionRef>>();
3003 for (const auto &tx : pblock->vtx) {
3004 most_recent_block_txs->emplace(tx->GetId(), tx);
3005 }
3006
3007 LOCK(m_most_recent_block_mutex);
3008 m_most_recent_block_hash = hashBlock;
3009 m_most_recent_block = pblock;
3010 m_most_recent_compact_block = pcmpctblock;
3011 m_most_recent_block_txs = std::move(most_recent_block_txs);
3012 }
3013
3014 m_connman.ForEachNode(
3015 [this, pindex, &lazy_ser, &hashBlock](CNode *pnode)
3018
3020 pnode->fDisconnect) {
3021 return;
3022 }
3023 ProcessBlockAvailability(pnode->GetId());
3024 CNodeState &state = *State(pnode->GetId());
3025 // If the peer has, or we announced to them the previous block
3026 // already, but we don't think they have this one, go ahead and
3027 // announce it.
3028 if (state.m_requested_hb_cmpctblocks &&
3029 !PeerHasHeader(&state, pindex) &&
3030 PeerHasHeader(&state, pindex->pprev)) {
3032 "%s sending header-and-ids %s to peer=%d\n",
3033 "PeerManager::NewPoWValidBlock",
3034 hashBlock.ToString(), pnode->GetId());
3035
3036 const CSerializedNetMsg &ser_cmpctblock{lazy_ser.get()};
3037 m_connman.PushMessage(pnode, ser_cmpctblock.Copy());
3038 state.pindexBestHeaderSent = pindex;
3039 }
3040 });
3041}
3042
3047void PeerManagerImpl::UpdatedBlockTip(const CBlockIndex *pindexNew,
3048 const CBlockIndex *pindexFork,
3049 bool fInitialDownload) {
3050 SetBestHeight(pindexNew->nHeight);
3051 SetServiceFlagsIBDCache(!fInitialDownload);
3052
3053 // Don't relay inventory during initial block download.
3054 if (fInitialDownload) {
3055 return;
3056 }
3057
3058 // Find the hashes of all blocks that weren't previously in the best chain.
3059 std::vector<BlockHash> vHashes;
3060 const CBlockIndex *pindexToAnnounce = pindexNew;
3061 while (pindexToAnnounce != pindexFork) {
3062 vHashes.push_back(pindexToAnnounce->GetBlockHash());
3063 pindexToAnnounce = pindexToAnnounce->pprev;
3064 if (vHashes.size() == MAX_BLOCKS_TO_ANNOUNCE) {
3065 // Limit announcements in case of a huge reorganization. Rely on the
3066 // peer's synchronization mechanism in that case.
3067 break;
3068 }
3069 }
3070
3071 {
3072 LOCK(m_peer_mutex);
3073 for (auto &it : m_peer_map) {
3074 Peer &peer = *it.second;
3075 LOCK(peer.m_block_inv_mutex);
3076 for (const BlockHash &hash : reverse_iterate(vHashes)) {
3077 peer.m_blocks_for_headers_relay.push_back(hash);
3078 }
3079 }
3080 }
3081
3082 m_connman.WakeMessageHandler();
3083}
3084
3089void PeerManagerImpl::BlockChecked(const CBlock &block,
3090 const BlockValidationState &state) {
3091 LOCK(cs_main);
3092
3093 const BlockHash hash = block.GetHash();
3094 std::map<BlockHash, std::pair<NodeId, bool>>::iterator it =
3095 mapBlockSource.find(hash);
3096
3097 // If the block failed validation, we know where it came from and we're
3098 // still connected to that peer, maybe punish.
3099 if (state.IsInvalid() && it != mapBlockSource.end() &&
3100 State(it->second.first)) {
3101 MaybePunishNodeForBlock(/*nodeid=*/it->second.first, state,
3102 /*via_compact_block=*/!it->second.second);
3103 }
3104 // Check that:
3105 // 1. The block is valid
3106 // 2. We're not in initial block download
3107 // 3. This is currently the best block we're aware of. We haven't updated
3108 // the tip yet so we have no way to check this directly here. Instead we
3109 // just check that there are currently no other blocks in flight.
3110 else if (state.IsValid() && !m_chainman.IsInitialBlockDownload() &&
3111 mapBlocksInFlight.count(hash) == mapBlocksInFlight.size()) {
3112 if (it != mapBlockSource.end()) {
3113 MaybeSetPeerAsAnnouncingHeaderAndIDs(it->second.first);
3114 }
3115 }
3116
3117 if (it != mapBlockSource.end()) {
3118 mapBlockSource.erase(it);
3119 }
3120}
3121
3123//
3124// Messages
3125//
3126
3127bool PeerManagerImpl::AlreadyHaveTx(const TxId &txid,
3128 bool include_reconsiderable) {
3129 if (m_chainman.ActiveChain().Tip()->GetBlockHash() !=
3130 hashRecentRejectsChainTip) {
3131 // If the chain tip has changed previously rejected transactions
3132 // might be now valid, e.g. due to a nLockTime'd tx becoming
3133 // valid, or a double-spend. Reset the rejects filter and give
3134 // those txs a second chance.
3135 hashRecentRejectsChainTip =
3136 m_chainman.ActiveChain().Tip()->GetBlockHash();
3137 m_recent_rejects.reset();
3138 m_recent_rejects_package_reconsiderable.reset();
3139 }
3140
3141 if (m_mempool.withOrphanage([&txid](const TxOrphanage &orphanage) {
3142 return orphanage.HaveTx(txid);
3143 })) {
3144 return true;
3145 }
3146
3147 if (m_mempool.withConflicting([&txid](const TxConflicting &conflicting) {
3148 return conflicting.HaveTx(txid);
3149 })) {
3150 return true;
3151 }
3152
3153 if (include_reconsiderable &&
3154 m_recent_rejects_package_reconsiderable.contains(txid)) {
3155 return true;
3156 }
3157
3158 {
3159 LOCK(m_recent_confirmed_transactions_mutex);
3160 if (m_recent_confirmed_transactions.contains(txid)) {
3161 return true;
3162 }
3163 }
3164
3165 return m_recent_rejects.contains(txid) || m_mempool.exists(txid);
3166}
3167
3168bool PeerManagerImpl::AlreadyHaveBlock(const BlockHash &block_hash) {
3169 return m_chainman.m_blockman.LookupBlockIndex(block_hash) != nullptr;
3170}
3171
3172bool PeerManagerImpl::AlreadyHaveProof(const avalanche::ProofId &proofid) {
3173 if (!Assume(m_avalanche)) {
3174 return false;
3175 }
3176
3177 auto localProof = m_avalanche->getLocalProof();
3178 if (localProof && localProof->getId() == proofid) {
3179 return true;
3180 }
3181
3182 return m_avalanche->withPeerManager([&proofid](avalanche::PeerManager &pm) {
3183 return pm.exists(proofid) || pm.isInvalid(proofid);
3184 });
3185}
3186
3187void PeerManagerImpl::SendPings() {
3188 LOCK(m_peer_mutex);
3189 for (auto &it : m_peer_map) {
3190 it.second->m_ping_queued = true;
3191 }
3192}
3193
3194void PeerManagerImpl::RelayTransaction(const TxId &txid) {
3195 LOCK(m_peer_mutex);
3196 for (auto &it : m_peer_map) {
3197 Peer &peer = *it.second;
3198 auto tx_relay = peer.GetTxRelay();
3199 if (!tx_relay) {
3200 continue;
3201 }
3202 LOCK(tx_relay->m_tx_inventory_mutex);
3203 // Only queue transactions for announcement once the version handshake
3204 // is completed. The time of arrival for these transactions is
3205 // otherwise at risk of leaking to a spy, if the spy is able to
3206 // distinguish transactions received during the handshake from the rest
3207 // in the announcement.
3208 if (tx_relay->m_next_inv_send_time == 0s) {
3209 continue;
3210 }
3211
3212 if (!tx_relay->m_tx_inventory_known_filter.contains(txid) ||
3213 tx_relay->m_avalanche_stalled_txids.count(txid) > 0) {
3214 tx_relay->m_tx_inventory_to_send.insert(txid);
3215 }
3216 }
3217}
3218
3219void PeerManagerImpl::RelayProof(const avalanche::ProofId &proofid) {
3220 LOCK(m_peer_mutex);
3221 for (auto &it : m_peer_map) {
3222 Peer &peer = *it.second;
3223
3224 if (!peer.m_proof_relay) {
3225 continue;
3226 }
3227 LOCK(peer.m_proof_relay->m_proof_inventory_mutex);
3228 if (!peer.m_proof_relay->m_proof_inventory_known_filter.contains(
3229 proofid)) {
3230 peer.m_proof_relay->m_proof_inventory_to_send.insert(proofid);
3231 }
3232 }
3233}
3234
3235void PeerManagerImpl::RelayAddress(NodeId originator, const CAddress &addr,
3236 bool fReachable) {
3237 // We choose the same nodes within a given 24h window (if the list of
3238 // connected nodes does not change) and we don't relay to nodes that already
3239 // know an address. So within 24h we will likely relay a given address once.
3240 // This is to prevent a peer from unjustly giving their address better
3241 // propagation by sending it to us repeatedly.
3242
3243 if (!fReachable && !addr.IsRelayable()) {
3244 return;
3245 }
3246
3247 // Relay to a limited number of other nodes
3248 // Use deterministic randomness to send to the same nodes for 24 hours
3249 // at a time so the m_addr_knowns of the chosen nodes prevent repeats
3250 const uint64_t hash_addr{CServiceHash(0, 0)(addr)};
3251 const auto current_time{GetTime<std::chrono::seconds>()};
3252 // Adding address hash makes exact rotation time different per address,
3253 // while preserving periodicity.
3254 const uint64_t time_addr{
3255 (static_cast<uint64_t>(count_seconds(current_time)) + hash_addr) /
3257
3258 const CSipHasher hasher{
3260 .Write(hash_addr)
3261 .Write(time_addr)};
3262
3263 // Relay reachable addresses to 2 peers. Unreachable addresses are relayed
3264 // randomly to 1 or 2 peers.
3265 unsigned int nRelayNodes = (fReachable || (hasher.Finalize() & 1)) ? 2 : 1;
3266 std::array<std::pair<uint64_t, Peer *>, 2> best{
3267 {{0, nullptr}, {0, nullptr}}};
3268 assert(nRelayNodes <= best.size());
3269
3270 LOCK(m_peer_mutex);
3271
3272 for (auto &[id, peer] : m_peer_map) {
3273 if (peer->m_addr_relay_enabled && id != originator &&
3274 IsAddrCompatible(*peer, addr)) {
3275 uint64_t hashKey = CSipHasher(hasher).Write(id).Finalize();
3276 for (unsigned int i = 0; i < nRelayNodes; i++) {
3277 if (hashKey > best[i].first) {
3278 std::copy(best.begin() + i, best.begin() + nRelayNodes - 1,
3279 best.begin() + i + 1);
3280 best[i] = std::make_pair(hashKey, peer.get());
3281 break;
3282 }
3283 }
3284 }
3285 };
3286
3287 for (unsigned int i = 0; i < nRelayNodes && best[i].first != 0; i++) {
3288 PushAddress(*best[i].second, addr);
3289 }
3290}
3291
3292void PeerManagerImpl::ProcessGetBlockData(const Config &config, CNode &pfrom,
3293 Peer &peer, const CInv &inv) {
3294 const BlockHash hash(inv.hash);
3295
3296 std::shared_ptr<const CBlock> a_recent_block;
3297 std::shared_ptr<const CBlockHeaderAndShortTxIDs> a_recent_compact_block;
3298 {
3299 LOCK(m_most_recent_block_mutex);
3300 a_recent_block = m_most_recent_block;
3301 a_recent_compact_block = m_most_recent_compact_block;
3302 }
3303
3304 bool need_activate_chain = false;
3305 {
3306 LOCK(cs_main);
3307 const CBlockIndex *pindex =
3308 m_chainman.m_blockman.LookupBlockIndex(hash);
3309 if (pindex) {
3310 if (pindex->HaveNumChainTxs() &&
3311 !pindex->IsValid(BlockValidity::SCRIPTS) &&
3312 pindex->IsValid(BlockValidity::TREE)) {
3313 // If we have the block and all of its parents, but have not yet
3314 // validated it, we might be in the middle of connecting it (ie
3315 // in the unlock of cs_main before ActivateBestChain but after
3316 // AcceptBlock). In this case, we need to run ActivateBestChain
3317 // prior to checking the relay conditions below.
3318 need_activate_chain = true;
3319 }
3320 }
3321 } // release cs_main before calling ActivateBestChain
3322 if (need_activate_chain) {
3324 if (!m_chainman.ActiveChainstate().ActivateBestChain(
3325 state, a_recent_block, m_avalanche)) {
3326 LogPrint(BCLog::NET, "failed to activate chain (%s)\n",
3327 state.ToString());
3328 }
3329 }
3330
3331 const CNetMsgMaker msgMaker(pfrom.GetCommonVersion());
3332 const CBlockIndex *pindex{nullptr};
3333 const CBlockIndex *tip{nullptr};
3334 bool can_direct_fetch{false};
3335 FlatFilePos block_pos{};
3336 {
3337 LOCK(cs_main);
3338 pindex = m_chainman.m_blockman.LookupBlockIndex(hash);
3339 if (!pindex) {
3340 return;
3341 }
3342 if (!BlockRequestAllowed(pindex)) {
3344 "%s: ignoring request from peer=%i for old "
3345 "block that isn't in the main chain\n",
3346 __func__, pfrom.GetId());
3347 return;
3348 }
3349 // Disconnect node in case we have reached the outbound limit for
3350 // serving historical blocks.
3351 if (m_connman.OutboundTargetReached(true) &&
3352 (((m_chainman.m_best_header != nullptr) &&
3353 (m_chainman.m_best_header->GetBlockTime() -
3354 pindex->GetBlockTime() >
3356 inv.IsMsgFilteredBlk()) &&
3357 // nodes with the download permission may exceed target
3359 LogPrint(
3360 BCLog::NET,
3361 "historical block serving limit reached, disconnect peer=%d\n",
3362 pfrom.GetId());
3363 pfrom.fDisconnect = true;
3364 return;
3365 }
3366 tip = m_chainman.ActiveChain().Tip();
3367 // Avoid leaking prune-height by never sending blocks below the
3368 // NODE_NETWORK_LIMITED threshold.
3369 // Add two blocks buffer extension for possible races
3371 ((((peer.m_our_services & NODE_NETWORK_LIMITED) ==
3373 ((peer.m_our_services & NODE_NETWORK) != NODE_NETWORK) &&
3374 (tip->nHeight - pindex->nHeight >
3375 (int)NODE_NETWORK_LIMITED_MIN_BLOCKS + 2)))) {
3377 "Ignore block request below NODE_NETWORK_LIMITED "
3378 "threshold, disconnect peer=%d\n",
3379 pfrom.GetId());
3380
3381 // disconnect node and prevent it from stalling (would otherwise
3382 // wait for the missing block)
3383 pfrom.fDisconnect = true;
3384 return;
3385 }
3386 // Pruned nodes may have deleted the block, so check whether it's
3387 // available before trying to send.
3388 if (!pindex->nStatus.hasData()) {
3389 return;
3390 }
3391 can_direct_fetch = CanDirectFetch();
3392 block_pos = pindex->GetBlockPos();
3393 }
3394
3395 std::shared_ptr<const CBlock> pblock;
3396 auto handle_block_read_error = [&]() {
3397 if (WITH_LOCK(m_chainman.GetMutex(),
3398 return m_chainman.m_blockman.IsBlockPruned(*pindex))) {
3400 "Block was pruned before it could be read, disconnect "
3401 "peer=%s\n",
3402 pfrom.GetId());
3403 } else {
3404 LogError("Cannot load block from disk, disconnect peer=%d\n",
3405 pfrom.GetId());
3406 }
3407 pfrom.fDisconnect = true;
3408 };
3409
3410 if (a_recent_block && a_recent_block->GetHash() == pindex->GetBlockHash()) {
3411 pblock = a_recent_block;
3412 } else if (!inv.IsMsgCmpctBlk()) {
3413 // Fast-path: in this case it is possible to serve the block directly
3414 // from disk, as the network format matches the format on disk
3415 std::vector<uint8_t> block_data;
3416 if (!m_chainman.m_blockman.ReadRawBlockFromDisk(block_data,
3417 block_pos)) {
3418 handle_block_read_error();
3419 return;
3420 }
3421 m_connman.PushMessage(
3422 &pfrom, msgMaker.Make(NetMsgType::BLOCK, Span{block_data}));
3423 // Don't set pblock as we've sent the block
3424 } else {
3425 // Send block from disk
3426 std::shared_ptr<CBlock> pblockRead = std::make_shared<CBlock>();
3427 if (!m_chainman.m_blockman.ReadBlockFromDisk(*pblockRead, block_pos)) {
3428 handle_block_read_error();
3429 return;
3430 }
3431 pblock = pblockRead;
3432 }
3433 if (pblock) {
3434 if (inv.IsMsgBlk()) {
3435 m_connman.PushMessage(&pfrom,
3436 msgMaker.Make(NetMsgType::BLOCK, *pblock));
3437 } else if (inv.IsMsgFilteredBlk()) {
3438 bool sendMerkleBlock = false;
3439 CMerkleBlock merkleBlock;
3440 if (auto tx_relay = peer.GetTxRelay()) {
3441 LOCK(tx_relay->m_bloom_filter_mutex);
3442 if (tx_relay->m_bloom_filter) {
3443 sendMerkleBlock = true;
3444 merkleBlock =
3445 CMerkleBlock(*pblock, *tx_relay->m_bloom_filter);
3446 }
3447 }
3448 if (sendMerkleBlock) {
3449 m_connman.PushMessage(
3450 &pfrom,
3451 msgMaker.Make(NetMsgType::MERKLEBLOCK, merkleBlock));
3452 // CMerkleBlock just contains hashes, so also push any
3453 // transactions in the block the client did not see. This avoids
3454 // hurting performance by pointlessly requiring a round-trip.
3455 // Note that there is currently no way for a node to request any
3456 // single transactions we didn't send here - they must either
3457 // disconnect and retry or request the full block. Thus, the
3458 // protocol spec specified allows for us to provide duplicate
3459 // txn here, however we MUST always provide at least what the
3460 // remote peer needs.
3461 typedef std::pair<size_t, uint256> PairType;
3462 for (PairType &pair : merkleBlock.vMatchedTxn) {
3463 m_connman.PushMessage(
3464 &pfrom, msgMaker.Make(NetMsgType::TX,
3465 *pblock->vtx[pair.first]));
3466 }
3467 }
3468 // else
3469 // no response
3470 } else if (inv.IsMsgCmpctBlk()) {
3471 // If a peer is asking for old blocks, we're almost guaranteed they
3472 // won't have a useful mempool to match against a compact block, and
3473 // we don't feel like constructing the object for them, so instead
3474 // we respond with the full, non-compact block.
3475 int nSendFlags = 0;
3476 if (can_direct_fetch &&
3477 pindex->nHeight >= tip->nHeight - MAX_CMPCTBLOCK_DEPTH) {
3478 if (a_recent_compact_block &&
3479 a_recent_compact_block->header.GetHash() ==
3480 pindex->GetBlockHash()) {
3481 m_connman.PushMessage(
3482 &pfrom, msgMaker.Make(NetMsgType::CMPCTBLOCK,
3483 *a_recent_compact_block));
3484 } else {
3485 CBlockHeaderAndShortTxIDs cmpctblock(*pblock);
3486 m_connman.PushMessage(&pfrom,
3487 msgMaker.Make(nSendFlags,
3489 cmpctblock));
3490 }
3491 } else {
3492 m_connman.PushMessage(
3493 &pfrom,
3494 msgMaker.Make(nSendFlags, NetMsgType::BLOCK, *pblock));
3495 }
3496 }
3497 }
3498
3499 {
3500 LOCK(peer.m_block_inv_mutex);
3501 // Trigger the peer node to send a getblocks request for the next
3502 // batch of inventory.
3503 if (hash == peer.m_continuation_block) {
3504 // Send immediately. This must send even if redundant, and
3505 // we want it right after the last block so they don't wait for
3506 // other stuff first.
3507 std::vector<CInv> vInv;
3508 vInv.push_back(CInv(MSG_BLOCK, tip->GetBlockHash()));
3509 m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::INV, vInv));
3510 peer.m_continuation_block = BlockHash();
3511 }
3512 }
3513}
3514
3516PeerManagerImpl::FindTxForGetData(const Peer &peer, const TxId &txid,
3517 const std::chrono::seconds mempool_req,
3518 const std::chrono::seconds now) {
3519 auto txinfo = m_mempool.info(txid);
3520 if (txinfo.tx) {
3521 // If a TX could have been INVed in reply to a MEMPOOL request,
3522 // or is older than UNCONDITIONAL_RELAY_DELAY, permit the request
3523 // unconditionally.
3524 if ((mempool_req.count() && txinfo.m_time <= mempool_req) ||
3525 txinfo.m_time <= now - UNCONDITIONAL_RELAY_DELAY) {
3526 return std::move(txinfo.tx);
3527 }
3528 }
3529
3530 {
3531 LOCK(cs_main);
3532
3533 // Otherwise, the transaction might have been announced recently.
3534 bool recent =
3535 Assume(peer.GetTxRelay())->m_recently_announced_invs.contains(txid);
3536 if (recent && txinfo.tx) {
3537 return std::move(txinfo.tx);
3538 }
3539
3540 // Or it might be from the most recent block
3541 {
3542 LOCK(m_most_recent_block_mutex);
3543 if (m_most_recent_block_txs != nullptr) {
3544 auto it = m_most_recent_block_txs->find(txid);
3545 if (it != m_most_recent_block_txs->end()) {
3546 return it->second;
3547 }
3548 }
3549 }
3550 }
3551
3552 return {};
3553}
3554
3558PeerManagerImpl::FindProofForGetData(const Peer &peer,
3559 const avalanche::ProofId &proofid,
3560 const std::chrono::seconds now) {
3561 avalanche::ProofRef proof;
3562
3563 bool send_unconditionally =
3564 m_avalanche->withPeerManager([&](const avalanche::PeerManager &pm) {
3565 return pm.forPeer(proofid, [&](const avalanche::Peer &peer) {
3566 proof = peer.proof;
3567
3568 // If we know that proof for long enough, allow for requesting
3569 // it.
3570 return peer.registration_time <=
3572 });
3573 });
3574
3575 if (!proof) {
3576 // Always send our local proof if it gets requested, assuming it's
3577 // valid. This will make it easier to bind with peers upon startup where
3578 // the status of our proof is unknown pending for a block. Note that it
3579 // still needs to have been announced first (presumably via an avahello
3580 // message).
3581 proof = m_avalanche->getLocalProof();
3582 }
3583
3584 // We don't have this proof
3585 if (!proof) {
3586 return avalanche::ProofRef();
3587 }
3588
3589 if (send_unconditionally) {
3590 return proof;
3591 }
3592
3593 // Otherwise, the proofs must have been announced recently.
3594 if (peer.m_proof_relay->m_recently_announced_proofs.contains(proofid)) {
3595 return proof;
3596 }
3597
3598 return avalanche::ProofRef();
3599}
3600
3601void PeerManagerImpl::ProcessGetData(
3602 const Config &config, CNode &pfrom, Peer &peer,
3603 const std::atomic<bool> &interruptMsgProc) {
3605
3606 auto tx_relay = peer.GetTxRelay();
3607
3608 std::deque<CInv>::iterator it = peer.m_getdata_requests.begin();
3609 std::vector<CInv> vNotFound;
3610 const CNetMsgMaker msgMaker(pfrom.GetCommonVersion());
3611
3612 const auto now{GetTime<std::chrono::seconds>()};
3613 // Get last mempool request time
3614 const auto mempool_req = tx_relay != nullptr
3615 ? tx_relay->m_last_mempool_req.load()
3616 : std::chrono::seconds::min();
3617
3618 // Process as many TX or AVA_PROOF items from the front of the getdata
3619 // queue as possible, since they're common and it's efficient to batch
3620 // process them.
3621 while (it != peer.m_getdata_requests.end()) {
3622 if (interruptMsgProc) {
3623 return;
3624 }
3625 // The send buffer provides backpressure. If there's no space in
3626 // the buffer, pause processing until the next call.
3627 if (pfrom.fPauseSend) {
3628 break;
3629 }
3630
3631 const CInv &inv = *it;
3632
3633 if (it->IsMsgProof()) {
3634 if (!m_avalanche) {
3635 vNotFound.push_back(inv);
3636 ++it;
3637 continue;
3638 }
3639 const avalanche::ProofId proofid(inv.hash);
3640 auto proof = FindProofForGetData(peer, proofid, now);
3641 if (proof) {
3642 m_connman.PushMessage(
3643 &pfrom, msgMaker.Make(NetMsgType::AVAPROOF, *proof));
3644 m_avalanche->withPeerManager([&](avalanche::PeerManager &pm) {
3645 pm.removeUnbroadcastProof(proofid);
3646 });
3647 } else {
3648 vNotFound.push_back(inv);
3649 }
3650
3651 ++it;
3652 continue;
3653 }
3654
3655 if (it->IsMsgTx()) {
3656 if (tx_relay == nullptr) {
3657 // Ignore GETDATA requests for transactions from
3658 // block-relay-only peers and peers that asked us not to
3659 // announce transactions.
3660 continue;
3661 }
3662
3663 const TxId txid(inv.hash);
3664 CTransactionRef tx = FindTxForGetData(peer, txid, mempool_req, now);
3665 if (tx) {
3666 int nSendFlags = 0;
3667 m_connman.PushMessage(
3668 &pfrom, msgMaker.Make(nSendFlags, NetMsgType::TX, *tx));
3669 m_mempool.RemoveUnbroadcastTx(txid);
3670 // As we're going to send tx, make sure its unconfirmed parents
3671 // are made requestable.
3672 std::vector<TxId> parent_ids_to_add;
3673 {
3674 LOCK(m_mempool.cs);
3675 auto txiter = m_mempool.GetIter(tx->GetId());
3676 if (txiter) {
3677 auto &pentry = *txiter;
3678 const CTxMemPoolEntry::Parents &parents =
3679 (*pentry)->GetMemPoolParentsConst();
3680 parent_ids_to_add.reserve(parents.size());
3681 for (const auto &parent : parents) {
3682 if (parent.get()->GetTime() >
3684 parent_ids_to_add.push_back(
3685 parent.get()->GetTx().GetId());
3686 }
3687 }
3688 }
3689 }
3690 for (const TxId &parent_txid : parent_ids_to_add) {
3691 // Relaying a transaction with a recent but unconfirmed
3692 // parent.
3693 if (WITH_LOCK(tx_relay->m_tx_inventory_mutex,
3694 return !tx_relay->m_tx_inventory_known_filter
3695 .contains(parent_txid))) {
3696 tx_relay->m_recently_announced_invs.insert(parent_txid);
3697 }
3698 }
3699 } else {
3700 vNotFound.push_back(inv);
3701 }
3702
3703 ++it;
3704 continue;
3705 }
3706
3707 // It's neither a proof nor a transaction
3708 break;
3709 }
3710
3711 // Only process one BLOCK item per call, since they're uncommon and can be
3712 // expensive to process.
3713 if (it != peer.m_getdata_requests.end() && !pfrom.fPauseSend) {
3714 const CInv &inv = *it++;
3715 if (inv.IsGenBlkMsg()) {
3716 ProcessGetBlockData(config, pfrom, peer, inv);
3717 }
3718 // else: If the first item on the queue is an unknown type, we erase it
3719 // and continue processing the queue on the next call.
3720 }
3721
3722 peer.m_getdata_requests.erase(peer.m_getdata_requests.begin(), it);
3723
3724 if (!vNotFound.empty()) {
3725 // Let the peer know that we didn't find what it asked for, so it
3726 // doesn't have to wait around forever. SPV clients care about this
3727 // message: it's needed when they are recursively walking the
3728 // dependencies of relevant unconfirmed transactions. SPV clients want
3729 // to do that because they want to know about (and store and rebroadcast
3730 // and risk analyze) the dependencies of transactions relevant to them,
3731 // without having to download the entire memory pool. Also, other nodes
3732 // can use these messages to automatically request a transaction from
3733 // some other peer that annnounced it, and stop waiting for us to
3734 // respond. In normal operation, we often send NOTFOUND messages for
3735 // parents of transactions that we relay; if a peer is missing a parent,
3736 // they may assume we have them and request the parents from us.
3737 m_connman.PushMessage(&pfrom,
3738 msgMaker.Make(NetMsgType::NOTFOUND, vNotFound));
3739 }
3740}
3741
3742void PeerManagerImpl::SendBlockTransactions(
3743 CNode &pfrom, Peer &peer, const CBlock &block,
3744 const BlockTransactionsRequest &req) {
3745 BlockTransactions resp(req);
3746 for (size_t i = 0; i < req.indices.size(); i++) {
3747 if (req.indices[i] >= block.vtx.size()) {
3748 Misbehaving(peer, "getblocktxn with out-of-bounds tx indices");
3749 return;
3750 }
3751 resp.txn[i] = block.vtx[req.indices[i]];
3752 }
3753 LOCK(cs_main);
3754 const CNetMsgMaker msgMaker(pfrom.GetCommonVersion());
3755 int nSendFlags = 0;
3756 m_connman.PushMessage(
3757 &pfrom, msgMaker.Make(nSendFlags, NetMsgType::BLOCKTXN, resp));
3758}
3759
3760bool PeerManagerImpl::CheckHeadersPoW(const std::vector<CBlockHeader> &headers,
3761 const Consensus::Params &consensusParams,
3762 Peer &peer) {
3763 // Do these headers have proof-of-work matching what's claimed?
3764 if (!HasValidProofOfWork(headers, consensusParams)) {
3765 Misbehaving(peer, "header with invalid proof of work");
3766 return false;
3767 }
3768
3769 // Are these headers connected to each other?
3770 if (!CheckHeadersAreContinuous(headers)) {
3771 Misbehaving(peer, "non-continuous headers sequence");
3772 return false;
3773 }
3774 return true;
3775}
3776
3777arith_uint256 PeerManagerImpl::GetAntiDoSWorkThreshold() {
3778 arith_uint256 near_chaintip_work = 0;
3779 LOCK(cs_main);
3780 if (m_chainman.ActiveChain().Tip() != nullptr) {
3781 const CBlockIndex *tip = m_chainman.ActiveChain().Tip();
3782 // Use a 144 block buffer, so that we'll accept headers that fork from
3783 // near our tip.
3784 near_chaintip_work =
3785 tip->nChainWork -
3786 std::min<arith_uint256>(144 * GetBlockProof(*tip), tip->nChainWork);
3787 }
3788 return std::max(near_chaintip_work, m_chainman.MinimumChainWork());
3789}
3790
3797void PeerManagerImpl::HandleUnconnectingHeaders(
3798 CNode &pfrom, Peer &peer, const std::vector<CBlockHeader> &headers) {
3799 const CNetMsgMaker msgMaker(pfrom.GetCommonVersion());
3800
3801 // Try to fill in the missing headers.
3802 const CBlockIndex *best_header{
3803 WITH_LOCK(cs_main, return m_chainman.m_best_header)};
3804 if (MaybeSendGetHeaders(pfrom, GetLocator(best_header), peer)) {
3805 LogPrint(
3806 BCLog::NET,
3807 "received header %s: missing prev block %s, sending getheaders "
3808 "(%d) to end (peer=%d)\n",
3809 headers[0].GetHash().ToString(),
3810 headers[0].hashPrevBlock.ToString(), best_header->nHeight,
3811 pfrom.GetId());
3812 }
3813
3814 // Set hashLastUnknownBlock for this peer, so that if we
3815 // eventually get the headers - even from a different peer -
3816 // we can use this peer to download.
3818 UpdateBlockAvailability(pfrom.GetId(), headers.back().GetHash()));
3819}
3820
3821bool PeerManagerImpl::CheckHeadersAreContinuous(
3822 const std::vector<CBlockHeader> &headers) const {
3823 BlockHash hashLastBlock;
3824 for (const CBlockHeader &header : headers) {
3825 if (!hashLastBlock.IsNull() && header.hashPrevBlock != hashLastBlock) {
3826 return false;
3827 }
3828 hashLastBlock = header.GetHash();
3829 }
3830 return true;
3831}
3832
3833bool PeerManagerImpl::IsContinuationOfLowWorkHeadersSync(
3834 Peer &peer, CNode &pfrom, std::vector<CBlockHeader> &headers) {
3835 if (peer.m_headers_sync) {
3836 auto result = peer.m_headers_sync->ProcessNextHeaders(
3837 headers, headers.size() == MAX_HEADERS_RESULTS);
3838 // If it is a valid continuation, we should treat the existing
3839 // getheaders request as responded to.
3840 if (result.success) {
3841 peer.m_last_getheaders_timestamp = {};
3842 }
3843 if (result.request_more) {
3844 auto locator = peer.m_headers_sync->NextHeadersRequestLocator();
3845 // If we were instructed to ask for a locator, it should not be
3846 // empty.
3847 Assume(!locator.vHave.empty());
3848 // We can only be instructed to request more if processing was
3849 // successful.
3850 Assume(result.success);
3851 if (!locator.vHave.empty()) {
3852 // It should be impossible for the getheaders request to fail,
3853 // because we just cleared the last getheaders timestamp.
3854 bool sent_getheaders =
3855 MaybeSendGetHeaders(pfrom, locator, peer);
3856 Assume(sent_getheaders);
3857 LogPrint(BCLog::NET, "more getheaders (from %s) to peer=%d\n",
3858 locator.vHave.front().ToString(), pfrom.GetId());
3859 }
3860 }
3861
3862 if (peer.m_headers_sync->GetState() == HeadersSyncState::State::FINAL) {
3863 peer.m_headers_sync.reset(nullptr);
3864
3865 // Delete this peer's entry in m_headers_presync_stats.
3866 // If this is m_headers_presync_bestpeer, it will be replaced later
3867 // by the next peer that triggers the else{} branch below.
3868 LOCK(m_headers_presync_mutex);
3869 m_headers_presync_stats.erase(pfrom.GetId());
3870 } else {
3871 // Build statistics for this peer's sync.
3872 HeadersPresyncStats stats;
3873 stats.first = peer.m_headers_sync->GetPresyncWork();
3874 if (peer.m_headers_sync->GetState() ==
3876 stats.second = {peer.m_headers_sync->GetPresyncHeight(),
3877 peer.m_headers_sync->GetPresyncTime()};
3878 }
3879
3880 // Update statistics in stats.
3881 LOCK(m_headers_presync_mutex);
3882 m_headers_presync_stats[pfrom.GetId()] = stats;
3883 auto best_it =
3884 m_headers_presync_stats.find(m_headers_presync_bestpeer);
3885 bool best_updated = false;
3886 if (best_it == m_headers_presync_stats.end()) {
3887 // If the cached best peer is outdated, iterate over all
3888 // remaining ones (including newly updated one) to find the best
3889 // one.
3890 NodeId peer_best{-1};
3891 const HeadersPresyncStats *stat_best{nullptr};
3892 for (const auto &[_peer, _stat] : m_headers_presync_stats) {
3893 if (!stat_best || _stat > *stat_best) {
3894 peer_best = _peer;
3895 stat_best = &_stat;
3896 }
3897 }
3898 m_headers_presync_bestpeer = peer_best;
3899 best_updated = (peer_best == pfrom.GetId());
3900 } else if (best_it->first == pfrom.GetId() ||
3901 stats > best_it->second) {
3902 // pfrom was and remains the best peer, or pfrom just became
3903 // best.
3904 m_headers_presync_bestpeer = pfrom.GetId();
3905 best_updated = true;
3906 }
3907 if (best_updated && stats.second.has_value()) {
3908 // If the best peer updated, and it is in its first phase,
3909 // signal.
3910 m_headers_presync_should_signal = true;
3911 }
3912 }
3913
3914 if (result.success) {
3915 // We only overwrite the headers passed in if processing was
3916 // successful.
3917 headers.swap(result.pow_validated_headers);
3918 }
3919
3920 return result.success;
3921 }
3922 // Either we didn't have a sync in progress, or something went wrong
3923 // processing these headers, or we are returning headers to the caller to
3924 // process.
3925 return false;
3926}
3927
3928bool PeerManagerImpl::TryLowWorkHeadersSync(
3929 Peer &peer, CNode &pfrom, const CBlockIndex *chain_start_header,
3930 std::vector<CBlockHeader> &headers) {
3931 // Calculate the total work on this chain.
3932 arith_uint256 total_work =
3933 chain_start_header->nChainWork + CalculateHeadersWork(headers);
3934
3935 // Our dynamic anti-DoS threshold (minimum work required on a headers chain
3936 // before we'll store it)
3937 arith_uint256 minimum_chain_work = GetAntiDoSWorkThreshold();
3938
3939 // Avoid DoS via low-difficulty-headers by only processing if the headers
3940 // are part of a chain with sufficient work.
3941 if (total_work < minimum_chain_work) {
3942 // Only try to sync with this peer if their headers message was full;
3943 // otherwise they don't have more headers after this so no point in
3944 // trying to sync their too-little-work chain.
3945 if (headers.size() == MAX_HEADERS_RESULTS) {
3946 // Note: we could advance to the last header in this set that is
3947 // known to us, rather than starting at the first header (which we
3948 // may already have); however this is unlikely to matter much since
3949 // ProcessHeadersMessage() already handles the case where all
3950 // headers in a received message are already known and are
3951 // ancestors of m_best_header or chainActive.Tip(), by skipping
3952 // this logic in that case. So even if the first header in this set
3953 // of headers is known, some header in this set must be new, so
3954 // advancing to the first unknown header would be a small effect.
3955 LOCK(peer.m_headers_sync_mutex);
3956 peer.m_headers_sync.reset(
3957 new HeadersSyncState(peer.m_id, m_chainparams.GetConsensus(),
3958 chain_start_header, minimum_chain_work));
3959
3960 // Now a HeadersSyncState object for tracking this synchronization
3961 // is created, process the headers using it as normal. Failures are
3962 // handled inside of IsContinuationOfLowWorkHeadersSync.
3963 (void)IsContinuationOfLowWorkHeadersSync(peer, pfrom, headers);
3964 } else {
3966 "Ignoring low-work chain (height=%u) from peer=%d\n",
3967 chain_start_header->nHeight + headers.size(),
3968 pfrom.GetId());
3969 }
3970 // The peer has not yet given us a chain that meets our work threshold,
3971 // so we want to prevent further processing of the headers in any case.
3972 headers = {};
3973 return true;
3974 }
3975
3976 return false;
3977}
3978
3979bool PeerManagerImpl::IsAncestorOfBestHeaderOrTip(const CBlockIndex *header) {
3980 return header != nullptr &&
3981 ((m_chainman.m_best_header != nullptr &&
3982 header ==
3983 m_chainman.m_best_header->GetAncestor(header->nHeight)) ||
3984 m_chainman.ActiveChain().Contains(header));
3985}
3986
3987bool PeerManagerImpl::MaybeSendGetHeaders(CNode &pfrom,
3988 const CBlockLocator &locator,
3989 Peer &peer) {
3990 const CNetMsgMaker msgMaker(pfrom.GetCommonVersion());
3991
3992 const auto current_time = NodeClock::now();
3993
3994 // Only allow a new getheaders message to go out if we don't have a recent
3995 // one already in-flight
3996 if (current_time - peer.m_last_getheaders_timestamp >
3998 m_connman.PushMessage(
3999 &pfrom, msgMaker.Make(NetMsgType::GETHEADERS, locator, uint256()));
4000 peer.m_last_getheaders_timestamp = current_time;
4001 return true;
4002 }
4003 return false;
4004}
4005
4012void PeerManagerImpl::HeadersDirectFetchBlocks(const Config &config,
4013 CNode &pfrom,
4014 const CBlockIndex &last_header) {
4015 const CNetMsgMaker msgMaker(pfrom.GetCommonVersion());
4016
4017 LOCK(cs_main);
4018 CNodeState *nodestate = State(pfrom.GetId());
4019
4020 if (CanDirectFetch() && last_header.IsValid(BlockValidity::TREE) &&
4021 m_chainman.ActiveChain().Tip()->nChainWork <= last_header.nChainWork) {
4022 std::vector<const CBlockIndex *> vToFetch;
4023 const CBlockIndex *pindexWalk{&last_header};
4024 // Calculate all the blocks we'd need to switch to last_header, up to
4025 // a limit.
4026 while (pindexWalk && !m_chainman.ActiveChain().Contains(pindexWalk) &&
4027 vToFetch.size() <= MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
4028 if (!pindexWalk->nStatus.hasData() &&
4029 !IsBlockRequested(pindexWalk->GetBlockHash())) {
4030 // We don't have this block, and it's not yet in flight.
4031 vToFetch.push_back(pindexWalk);
4032 }
4033 pindexWalk = pindexWalk->pprev;
4034 }
4035 // If pindexWalk still isn't on our main chain, we're looking at a
4036 // very large reorg at a time we think we're close to caught up to
4037 // the main chain -- this shouldn't really happen. Bail out on the
4038 // direct fetch and rely on parallel download instead.
4039 if (!m_chainman.ActiveChain().Contains(pindexWalk)) {
4040 LogPrint(BCLog::NET, "Large reorg, won't direct fetch to %s (%d)\n",
4041 last_header.GetBlockHash().ToString(),
4042 last_header.nHeight);
4043 } else {
4044 std::vector<CInv> vGetData;
4045 // Download as much as possible, from earliest to latest.
4046 for (const CBlockIndex *pindex : reverse_iterate(vToFetch)) {
4047 if (nodestate->vBlocksInFlight.size() >=
4049 // Can't download any more from this peer
4050 break;
4051 }
4052 vGetData.push_back(CInv(MSG_BLOCK, pindex->GetBlockHash()));
4053 BlockRequested(config, pfrom.GetId(), *pindex);
4054 LogPrint(BCLog::NET, "Requesting block %s from peer=%d\n",
4055 pindex->GetBlockHash().ToString(), pfrom.GetId());
4056 }
4057 if (vGetData.size() > 1) {
4059 "Downloading blocks toward %s (%d) via headers "
4060 "direct fetch\n",
4061 last_header.GetBlockHash().ToString(),
4062 last_header.nHeight);
4063 }
4064 if (vGetData.size() > 0) {
4065 if (!m_opts.ignore_incoming_txs &&
4066 nodestate->m_provides_cmpctblocks && vGetData.size() == 1 &&
4067 mapBlocksInFlight.size() == 1 &&
4068 last_header.pprev->IsValid(BlockValidity::CHAIN)) {
4069 // In any case, we want to download using a compact
4070 // block, not a regular one.
4071 vGetData[0] = CInv(MSG_CMPCT_BLOCK, vGetData[0].hash);
4072 }
4073 m_connman.PushMessage(
4074 &pfrom, msgMaker.Make(NetMsgType::GETDATA, vGetData));
4075 }
4076 }
4077 }
4078}
4079
4085void PeerManagerImpl::UpdatePeerStateForReceivedHeaders(
4086 CNode &pfrom, Peer &peer, const CBlockIndex &last_header,
4087 bool received_new_header, bool may_have_more_headers) {
4088 LOCK(cs_main);
4089
4090 CNodeState *nodestate = State(pfrom.GetId());
4091
4092 UpdateBlockAvailability(pfrom.GetId(), last_header.GetBlockHash());
4093
4094 // From here, pindexBestKnownBlock should be guaranteed to be non-null,
4095 // because it is set in UpdateBlockAvailability. Some nullptr checks are
4096 // still present, however, as belt-and-suspenders.
4097
4098 if (received_new_header &&
4099 last_header.nChainWork > m_chainman.ActiveChain().Tip()->nChainWork) {
4100 nodestate->m_last_block_announcement = GetTime();
4101 }
4102
4103 // If we're in IBD, we want outbound peers that will serve us a useful
4104 // chain. Disconnect peers that are on chains with insufficient work.
4105 if (m_chainman.IsInitialBlockDownload() && !may_have_more_headers) {
4106 // When nCount < MAX_HEADERS_RESULTS, we know we have no more
4107 // headers to fetch from this peer.
4108 if (nodestate->pindexBestKnownBlock &&
4109 nodestate->pindexBestKnownBlock->nChainWork <
4110 m_chainman.MinimumChainWork()) {
4111 // This peer has too little work on their headers chain to help
4112 // us sync -- disconnect if it is an outbound disconnection
4113 // candidate.
4114 // Note: We compare their tip to the minimum chain work (rather than
4115 // m_chainman.ActiveChain().Tip()) because we won't start block
4116 // download until we have a headers chain that has at least
4117 // the minimum chain work, even if a peer has a chain past our tip,
4118 // as an anti-DoS measure.
4119 if (pfrom.IsOutboundOrBlockRelayConn()) {
4120 LogPrintf("Disconnecting outbound peer %d -- headers "
4121 "chain has insufficient work\n",
4122 pfrom.GetId());
4123 pfrom.fDisconnect = true;
4124 }
4125 }
4126 }
4127
4128 // If this is an outbound full-relay peer, check to see if we should
4129 // protect it from the bad/lagging chain logic.
4130 // Note that outbound block-relay peers are excluded from this
4131 // protection, and thus always subject to eviction under the bad/lagging
4132 // chain logic.
4133 // See ChainSyncTimeoutState.
4134 if (!pfrom.fDisconnect && pfrom.IsFullOutboundConn() &&
4135 nodestate->pindexBestKnownBlock != nullptr) {
4136 if (m_outbound_peers_with_protect_from_disconnect <
4138 nodestate->pindexBestKnownBlock->nChainWork >=
4139 m_chainman.ActiveChain().Tip()->nChainWork &&
4140 !nodestate->m_chain_sync.m_protect) {
4141 LogPrint(BCLog::NET, "Protecting outbound peer=%d from eviction\n",
4142 pfrom.GetId());
4143 nodestate->m_chain_sync.m_protect = true;
4144 ++m_outbound_peers_with_protect_from_disconnect;
4145 }
4146 }
4147}
4148
4149void PeerManagerImpl::ProcessHeadersMessage(const Config &config, CNode &pfrom,
4150 Peer &peer,
4151 std::vector<CBlockHeader> &&headers,
4152 bool via_compact_block) {
4153 size_t nCount = headers.size();
4154
4155 if (nCount == 0) {
4156 // Nothing interesting. Stop asking this peers for more headers.
4157 // If we were in the middle of headers sync, receiving an empty headers
4158 // message suggests that the peer suddenly has nothing to give us
4159 // (perhaps it reorged to our chain). Clear download state for this
4160 // peer.
4161 LOCK(peer.m_headers_sync_mutex);
4162 if (peer.m_headers_sync) {
4163 peer.m_headers_sync.reset(nullptr);
4164 LOCK(m_headers_presync_mutex);
4165 m_headers_presync_stats.erase(pfrom.GetId());
4166 }
4167 // A headers message with no headers cannot be an announcement, so
4168 // assume it is a response to our last getheaders request, if there is
4169 // one.
4170 peer.m_last_getheaders_timestamp = {};
4171 return;
4172 }
4173
4174 // Before we do any processing, make sure these pass basic sanity checks.
4175 // We'll rely on headers having valid proof-of-work further down, as an
4176 // anti-DoS criteria (note: this check is required before passing any
4177 // headers into HeadersSyncState).
4178 if (!CheckHeadersPoW(headers, m_chainparams.GetConsensus(), peer)) {
4179 // Misbehaving() calls are handled within CheckHeadersPoW(), so we can
4180 // just return. (Note that even if a header is announced via compact
4181 // block, the header itself should be valid, so this type of error can
4182 // always be punished.)
4183 return;
4184 }
4185
4186 const CBlockIndex *pindexLast = nullptr;
4187
4188 // We'll set already_validated_work to true if these headers are
4189 // successfully processed as part of a low-work headers sync in progress
4190 // (either in PRESYNC or REDOWNLOAD phase).
4191 // If true, this will mean that any headers returned to us (ie during
4192 // REDOWNLOAD) can be validated without further anti-DoS checks.
4193 bool already_validated_work = false;
4194
4195 // If we're in the middle of headers sync, let it do its magic.
4196 bool have_headers_sync = false;
4197 {
4198 LOCK(peer.m_headers_sync_mutex);
4199
4200 already_validated_work =
4201 IsContinuationOfLowWorkHeadersSync(peer, pfrom, headers);
4202
4203 // The headers we passed in may have been:
4204 // - untouched, perhaps if no headers-sync was in progress, or some
4205 // failure occurred
4206 // - erased, such as if the headers were successfully processed and no
4207 // additional headers processing needs to take place (such as if we
4208 // are still in PRESYNC)
4209 // - replaced with headers that are now ready for validation, such as
4210 // during the REDOWNLOAD phase of a low-work headers sync.
4211 // So just check whether we still have headers that we need to process,
4212 // or not.
4213 if (headers.empty()) {
4214 return;
4215 }
4216
4217 have_headers_sync = !!peer.m_headers_sync;
4218 }
4219
4220 // Do these headers connect to something in our block index?
4221 const CBlockIndex *chain_start_header{
4223 headers[0].hashPrevBlock))};
4224 bool headers_connect_blockindex{chain_start_header != nullptr};
4225
4226 if (!headers_connect_blockindex) {
4227 // This could be a BIP 130 block announcement, use
4228 // special logic for handling headers that don't connect, as this
4229 // could be benign.
4230 HandleUnconnectingHeaders(pfrom, peer, headers);
4231 return;
4232 }
4233
4234 // If headers connect, assume that this is in response to any outstanding
4235 // getheaders request we may have sent, and clear out the time of our last
4236 // request. Non-connecting headers cannot be a response to a getheaders
4237 // request.
4238 peer.m_last_getheaders_timestamp = {};
4239
4240 // If the headers we received are already in memory and an ancestor of
4241 // m_best_header or our tip, skip anti-DoS checks. These headers will not
4242 // use any more memory (and we are not leaking information that could be
4243 // used to fingerprint us).
4244 const CBlockIndex *last_received_header{nullptr};
4245 {
4246 LOCK(cs_main);
4247 last_received_header =
4248 m_chainman.m_blockman.LookupBlockIndex(headers.back().GetHash());
4249 if (IsAncestorOfBestHeaderOrTip(last_received_header)) {
4250 already_validated_work = true;
4251 }
4252 }
4253
4254 // If our peer has NetPermissionFlags::NoBan privileges, then bypass our
4255 // anti-DoS logic (this saves bandwidth when we connect to a trusted peer
4256 // on startup).
4258 already_validated_work = true;
4259 }
4260
4261 // At this point, the headers connect to something in our block index.
4262 // Do anti-DoS checks to determine if we should process or store for later
4263 // processing.
4264 if (!already_validated_work &&
4265 TryLowWorkHeadersSync(peer, pfrom, chain_start_header, headers)) {
4266 // If we successfully started a low-work headers sync, then there
4267 // should be no headers to process any further.
4268 Assume(headers.empty());
4269 return;
4270 }
4271
4272 // At this point, we have a set of headers with sufficient work on them
4273 // which can be processed.
4274
4275 // If we don't have the last header, then this peer will have given us
4276 // something new (if these headers are valid).
4277 bool received_new_header{last_received_header == nullptr};
4278
4279 // Now process all the headers.
4281 if (!m_chainman.ProcessNewBlockHeaders(headers, /*min_pow_checked=*/true,
4282 state, &pindexLast)) {
4283 if (state.IsInvalid()) {
4284 MaybePunishNodeForBlock(pfrom.GetId(), state, via_compact_block,
4285 "invalid header received");
4286 return;
4287 }
4288 }
4289 assert(pindexLast);
4290
4291 // Consider fetching more headers if we are not using our headers-sync
4292 // mechanism.
4293 if (nCount == MAX_HEADERS_RESULTS && !have_headers_sync) {
4294 // Headers message had its maximum size; the peer may have more headers.
4295 if (MaybeSendGetHeaders(pfrom, GetLocator(pindexLast), peer)) {
4296 LogPrint(
4297 BCLog::NET,
4298 "more getheaders (%d) to end to peer=%d (startheight:%d)\n",
4299 pindexLast->nHeight, pfrom.GetId(), peer.m_starting_height);
4300 }
4301 }
4302
4303 UpdatePeerStateForReceivedHeaders(pfrom, peer, *pindexLast,
4304 received_new_header,
4305 nCount == MAX_HEADERS_RESULTS);
4306
4307 // Consider immediately downloading blocks.
4308 HeadersDirectFetchBlocks(config, pfrom, *pindexLast);
4309}
4310
4311void PeerManagerImpl::ProcessInvalidTx(NodeId nodeid,
4312 const CTransactionRef &ptx,
4313 const TxValidationState &state,
4314 bool maybe_add_extra_compact_tx) {
4315 AssertLockNotHeld(m_peer_mutex);
4316 AssertLockHeld(g_msgproc_mutex);
4318
4319 const TxId &txid = ptx->GetId();
4320
4321 LogPrint(BCLog::MEMPOOLREJ, "%s from peer=%d was not accepted: %s\n",
4322 txid.ToString(), nodeid, state.ToString());
4323
4325 return;
4326 }
4327
4328 if (m_avalanche &&
4329 m_avalanche->isPreconsensusActivated(m_chainman.ActiveTip()) &&
4331 return;
4332 }
4333
4335 // If the result is TX_PACKAGE_RECONSIDERABLE, add it to
4336 // m_recent_rejects_package_reconsiderable because we should not
4337 // download or submit this transaction by itself again, but may submit
4338 // it as part of a package later.
4339 m_recent_rejects_package_reconsiderable.insert(txid);
4340 } else {
4341 m_recent_rejects.insert(txid);
4342 }
4343 m_txrequest.ForgetInvId(txid);
4344
4345 if (maybe_add_extra_compact_tx && RecursiveDynamicUsage(*ptx) < 100000) {
4346 AddToCompactExtraTransactions(ptx);
4347 }
4348
4349 MaybePunishNodeForTx(nodeid, state);
4350
4351 // If the tx failed in ProcessOrphanTx, it should be removed from the
4352 // orphanage unless the tx was still missing inputs. If the tx was not in
4353 // the orphanage, EraseTx does nothing and returns 0.
4354 if (m_mempool.withOrphanage([&txid](TxOrphanage &orphanage) {
4355 return orphanage.EraseTx(txid);
4356 }) > 0) {
4357 LogPrint(BCLog::TXPACKAGES, " removed orphan tx %s\n",
4358 txid.ToString());
4359 }
4360}
4361
4362void PeerManagerImpl::ProcessValidTx(NodeId nodeid, const CTransactionRef &tx) {
4363 AssertLockNotHeld(m_peer_mutex);
4364 AssertLockHeld(g_msgproc_mutex);
4366
4367 // As this version of the transaction was acceptable, we can forget about
4368 // any requests for it. No-op if the tx is not in txrequest.
4369 m_txrequest.ForgetInvId(tx->GetId());
4370
4371 m_mempool.withOrphanage([&tx](TxOrphanage &orphanage) {
4372 orphanage.AddChildrenToWorkSet(*tx);
4373 // If it came from the orphanage, remove it. No-op if the tx is not in
4374 // txorphanage.
4375 orphanage.EraseTx(tx->GetId());
4376 });
4377
4378 LogPrint(
4380 "AcceptToMemoryPool: peer=%d: accepted %s (poolsz %u txn, %u kB)\n",
4381 nodeid, tx->GetId().ToString(), m_mempool.size(),
4382 m_mempool.DynamicMemoryUsage() / 1000);
4383
4384 RelayTransaction(tx->GetId());
4385}
4386
4387void PeerManagerImpl::ProcessPackageResult(
4388 const PackageToValidate &package_to_validate,
4389 const PackageMempoolAcceptResult &package_result) {
4390 AssertLockNotHeld(m_peer_mutex);
4391 AssertLockHeld(g_msgproc_mutex);
4393
4394 const auto &package = package_to_validate.m_txns;
4395 const auto &senders = package_to_validate.m_senders;
4396
4397 if (package_result.m_state.IsInvalid()) {
4398 m_recent_rejects_package_reconsiderable.insert(GetPackageHash(package));
4399 }
4400 // We currently only expect to process 1-parent-1-child packages. Remove if
4401 // this changes.
4402 if (!Assume(package.size() == 2)) {
4403 return;
4404 }
4405
4406 // Iterate backwards to erase in-package descendants from the orphanage
4407 // before they become relevant in AddChildrenToWorkSet.
4408 auto package_iter = package.rbegin();
4409 auto senders_iter = senders.rbegin();
4410 while (package_iter != package.rend()) {
4411 const auto &tx = *package_iter;
4412 const NodeId nodeid = *senders_iter;
4413 const auto it_result{package_result.m_tx_results.find(tx->GetId())};
4414
4415 // It is not guaranteed that a result exists for every transaction.
4416 if (it_result != package_result.m_tx_results.end()) {
4417 const auto &tx_result = it_result->second;
4418 switch (tx_result.m_result_type) {
4420 ProcessValidTx(nodeid, tx);
4421 break;
4422 }
4424 // Don't add to vExtraTxnForCompact, as these transactions
4425 // should have already been added there when added to the
4426 // orphanage or rejected for TX_PACKAGE_RECONSIDERABLE.
4427 // This should be updated if package submission is ever used
4428 // for transactions that haven't already been validated
4429 // before.
4430 ProcessInvalidTx(nodeid, tx, tx_result.m_state,
4431 /*maybe_add_extra_compact_tx=*/false);
4432 break;
4433 }
4435 // AlreadyHaveTx() should be catching transactions that are
4436 // already in mempool.
4437 Assume(false);
4438 break;
4439 }
4440 }
4441 }
4442 package_iter++;
4443 senders_iter++;
4444 }
4445}
4446
4447std::optional<PeerManagerImpl::PackageToValidate>
4448PeerManagerImpl::Find1P1CPackage(const CTransactionRef &ptx, NodeId nodeid) {
4449 AssertLockNotHeld(m_peer_mutex);
4450 AssertLockHeld(g_msgproc_mutex);
4452
4453 const auto &parent_txid{ptx->GetId()};
4454
4455 Assume(m_recent_rejects_package_reconsiderable.contains(parent_txid));
4456
4457 // Prefer children from this peer. This helps prevent censorship attempts in
4458 // which an attacker sends lots of fake children for the parent, and we
4459 // (unluckily) keep selecting the fake children instead of the real one
4460 // provided by the honest peer.
4461 const auto cpfp_candidates_same_peer{
4462 m_mempool.withOrphanage([&ptx, nodeid](const TxOrphanage &orphanage) {
4463 return orphanage.GetChildrenFromSamePeer(ptx, nodeid);
4464 })};
4465
4466 // These children should be sorted from newest to oldest.
4467 for (const auto &child : cpfp_candidates_same_peer) {
4468 Package maybe_cpfp_package{ptx, child};
4469 if (!m_recent_rejects_package_reconsiderable.contains(
4470 GetPackageHash(maybe_cpfp_package))) {
4471 return PeerManagerImpl::PackageToValidate{ptx, child, nodeid,
4472 nodeid};
4473 }
4474 }
4475
4476 // If no suitable candidate from the same peer is found, also try children
4477 // that were provided by a different peer. This is useful because sometimes
4478 // multiple peers announce both transactions to us, and we happen to
4479 // download them from different peers (we wouldn't have known that these 2
4480 // transactions are related). We still want to find 1p1c packages then.
4481 //
4482 // If we start tracking all announcers of orphans, we can restrict this
4483 // logic to parent + child pairs in which both were provided by the same
4484 // peer, i.e. delete this step.
4485 const auto cpfp_candidates_different_peer{
4486 m_mempool.withOrphanage([&ptx, nodeid](const TxOrphanage &orphanage) {
4487 return orphanage.GetChildrenFromDifferentPeer(ptx, nodeid);
4488 })};
4489
4490 // Find the first 1p1c that hasn't already been rejected. We randomize the
4491 // order to not create a bias that attackers can use to delay package
4492 // acceptance.
4493 //
4494 // Create a random permutation of the indices.
4495 std::vector<size_t> tx_indices(cpfp_candidates_different_peer.size());
4496 std::iota(tx_indices.begin(), tx_indices.end(), 0);
4497 Shuffle(tx_indices.begin(), tx_indices.end(), m_rng);
4498
4499 for (const auto index : tx_indices) {
4500 // If we already tried a package and failed for any reason, the combined
4501 // hash was cached in m_recent_rejects_package_reconsiderable.
4502 const auto [child_tx, child_sender] =
4503 cpfp_candidates_different_peer.at(index);
4504 Package maybe_cpfp_package{ptx, child_tx};
4505 if (!m_recent_rejects_package_reconsiderable.contains(
4506 GetPackageHash(maybe_cpfp_package))) {
4507 return PeerManagerImpl::PackageToValidate{ptx, child_tx, nodeid,
4508 child_sender};
4509 }
4510 }
4511 return std::nullopt;
4512}
4513
4514bool PeerManagerImpl::ProcessOrphanTx(const Config &config, Peer &peer) {
4515 AssertLockHeld(g_msgproc_mutex);
4516 LOCK(cs_main);
4517
4518 while (CTransactionRef porphanTx =
4519 m_mempool.withOrphanage([&peer](TxOrphanage &orphanage) {
4520 return orphanage.GetTxToReconsider(peer.m_id);
4521 })) {
4522 const MempoolAcceptResult result =
4523 m_chainman.ProcessTransaction(porphanTx);
4524 const TxValidationState &state = result.m_state;
4525 const TxId &orphanTxId = porphanTx->GetId();
4526
4528 LogPrint(BCLog::TXPACKAGES, " accepted orphan tx %s\n",
4529 orphanTxId.ToString());
4530 ProcessValidTx(peer.m_id, porphanTx);
4531 return true;
4532 }
4533
4536 " invalid orphan tx %s from peer=%d. %s\n",
4537 orphanTxId.ToString(), peer.m_id, state.ToString());
4538
4539 if (Assume(state.IsInvalid() &&
4541 state.GetResult() !=
4543 ProcessInvalidTx(peer.m_id, porphanTx, state,
4544 /*maybe_add_extra_compact_tx=*/false);
4545 }
4546
4547 return true;
4548 }
4549 }
4550
4551 return false;
4552}
4553
4554bool PeerManagerImpl::PrepareBlockFilterRequest(
4555 CNode &node, Peer &peer, BlockFilterType filter_type, uint32_t start_height,
4556 const BlockHash &stop_hash, uint32_t max_height_diff,
4557 const CBlockIndex *&stop_index, BlockFilterIndex *&filter_index) {
4558 const bool supported_filter_type =
4559 (filter_type == BlockFilterType::BASIC &&
4560 (peer.m_our_services & NODE_COMPACT_FILTERS));
4561 if (!supported_filter_type) {
4563 "peer %d requested unsupported block filter type: %d\n",
4564 node.GetId(), static_cast<uint8_t>(filter_type));
4565 node.fDisconnect = true;
4566 return false;
4567 }
4568
4569 {
4570 LOCK(cs_main);
4571 stop_index = m_chainman.m_blockman.LookupBlockIndex(stop_hash);
4572
4573 // Check that the stop block exists and the peer would be allowed to
4574 // fetch it.
4575 if (!stop_index || !BlockRequestAllowed(stop_index)) {
4576 LogPrint(BCLog::NET, "peer %d requested invalid block hash: %s\n",
4577 node.GetId(), stop_hash.ToString());
4578 node.fDisconnect = true;
4579 return false;
4580 }
4581 }
4582
4583 uint32_t stop_height = stop_index->nHeight;
4584 if (start_height > stop_height) {
4585 LogPrint(
4586 BCLog::NET,
4587 "peer %d sent invalid getcfilters/getcfheaders with " /* Continued
4588 */
4589 "start height %d and stop height %d\n",
4590 node.GetId(), start_height, stop_height);
4591 node.fDisconnect = true;
4592 return false;
4593 }
4594 if (stop_height - start_height >= max_height_diff) {
4596 "peer %d requested too many cfilters/cfheaders: %d / %d\n",
4597 node.GetId(), stop_height - start_height + 1, max_height_diff);
4598 node.fDisconnect = true;
4599 return false;
4600 }
4601
4602 filter_index = GetBlockFilterIndex(filter_type);
4603 if (!filter_index) {
4604 LogPrint(BCLog::NET, "Filter index for supported type %s not found\n",
4605 BlockFilterTypeName(filter_type));
4606 return false;
4607 }
4608
4609 return true;
4610}
4611
4612void PeerManagerImpl::ProcessGetCFilters(CNode &node, Peer &peer,
4613 CDataStream &vRecv) {
4614 uint8_t filter_type_ser;
4615 uint32_t start_height;
4616 BlockHash stop_hash;
4617
4618 vRecv >> filter_type_ser >> start_height >> stop_hash;
4619
4620 const BlockFilterType filter_type =
4621 static_cast<BlockFilterType>(filter_type_ser);
4622
4623 const CBlockIndex *stop_index;
4624 BlockFilterIndex *filter_index;
4625 if (!PrepareBlockFilterRequest(node, peer, filter_type, start_height,
4626 stop_hash, MAX_GETCFILTERS_SIZE, stop_index,
4627 filter_index)) {
4628 return;
4629 }
4630
4631 std::vector<BlockFilter> filters;
4632 if (!filter_index->LookupFilterRange(start_height, stop_index, filters)) {
4634 "Failed to find block filter in index: filter_type=%s, "
4635 "start_height=%d, stop_hash=%s\n",
4636 BlockFilterTypeName(filter_type), start_height,
4637 stop_hash.ToString());
4638 return;
4639 }
4640
4641 for (const auto &filter : filters) {
4642 CSerializedNetMsg msg = CNetMsgMaker(node.GetCommonVersion())
4643 .Make(NetMsgType::CFILTER, filter);
4644 m_connman.PushMessage(&node, std::move(msg));
4645 }
4646}
4647
4648void PeerManagerImpl::ProcessGetCFHeaders(CNode &node, Peer &peer,
4649 CDataStream &vRecv) {
4650 uint8_t filter_type_ser;
4651 uint32_t start_height;
4652 BlockHash stop_hash;
4653
4654 vRecv >> filter_type_ser >> start_height >> stop_hash;
4655
4656 const BlockFilterType filter_type =
4657 static_cast<BlockFilterType>(filter_type_ser);
4658
4659 const CBlockIndex *stop_index;
4660 BlockFilterIndex *filter_index;
4661 if (!PrepareBlockFilterRequest(node, peer, filter_type, start_height,
4662 stop_hash, MAX_GETCFHEADERS_SIZE, stop_index,
4663 filter_index)) {
4664 return;
4665 }
4666
4667 uint256 prev_header;
4668 if (start_height > 0) {
4669 const CBlockIndex *const prev_block =
4670 stop_index->GetAncestor(static_cast<int>(start_height - 1));
4671 if (!filter_index->LookupFilterHeader(prev_block, prev_header)) {
4673 "Failed to find block filter header in index: "
4674 "filter_type=%s, block_hash=%s\n",
4675 BlockFilterTypeName(filter_type),
4676 prev_block->GetBlockHash().ToString());
4677 return;
4678 }
4679 }
4680
4681 std::vector<uint256> filter_hashes;
4682 if (!filter_index->LookupFilterHashRange(start_height, stop_index,
4683 filter_hashes)) {
4685 "Failed to find block filter hashes in index: filter_type=%s, "
4686 "start_height=%d, stop_hash=%s\n",
4687 BlockFilterTypeName(filter_type), start_height,
4688 stop_hash.ToString());
4689 return;
4690 }
4691
4692 CSerializedNetMsg msg =
4693 CNetMsgMaker(node.GetCommonVersion())
4694 .Make(NetMsgType::CFHEADERS, filter_type_ser,
4695 stop_index->GetBlockHash(), prev_header, filter_hashes);
4696 m_connman.PushMessage(&node, std::move(msg));
4697}
4698
4699void PeerManagerImpl::ProcessGetCFCheckPt(CNode &node, Peer &peer,
4700 CDataStream &vRecv) {
4701 uint8_t filter_type_ser;
4702 BlockHash stop_hash;
4703
4704 vRecv >> filter_type_ser >> stop_hash;
4705
4706 const BlockFilterType filter_type =
4707 static_cast<BlockFilterType>(filter_type_ser);
4708
4709 const CBlockIndex *stop_index;
4710 BlockFilterIndex *filter_index;
4711 if (!PrepareBlockFilterRequest(
4712 node, peer, filter_type, /*start_height=*/0, stop_hash,
4713 /*max_height_diff=*/std::numeric_limits<uint32_t>::max(),
4714 stop_index, filter_index)) {
4715 return;
4716 }
4717
4718 std::vector<uint256> headers(stop_index->nHeight / CFCHECKPT_INTERVAL);
4719
4720 // Populate headers.
4721 const CBlockIndex *block_index = stop_index;
4722 for (int i = headers.size() - 1; i >= 0; i--) {
4723 int height = (i + 1) * CFCHECKPT_INTERVAL;
4724 block_index = block_index->GetAncestor(height);
4725
4726 if (!filter_index->LookupFilterHeader(block_index, headers[i])) {
4728 "Failed to find block filter header in index: "
4729 "filter_type=%s, block_hash=%s\n",
4730 BlockFilterTypeName(filter_type),
4731 block_index->GetBlockHash().ToString());
4732 return;
4733 }
4734 }
4735
4736 CSerializedNetMsg msg = CNetMsgMaker(node.GetCommonVersion())
4737 .Make(NetMsgType::CFCHECKPT, filter_type_ser,
4738 stop_index->GetBlockHash(), headers);
4739 m_connman.PushMessage(&node, std::move(msg));
4740}
4741
4742bool IsAvalancheMessageType(const std::string &msg_type) {
4743 return msg_type == NetMsgType::AVAHELLO ||
4744 msg_type == NetMsgType::AVAPOLL ||
4745 msg_type == NetMsgType::AVARESPONSE ||
4746 msg_type == NetMsgType::AVAPROOF ||
4747 msg_type == NetMsgType::GETAVAADDR ||
4748 msg_type == NetMsgType::GETAVAPROOFS ||
4749 msg_type == NetMsgType::AVAPROOFS ||
4750 msg_type == NetMsgType::AVAPROOFSREQ;
4751}
4752
4753uint32_t
4754PeerManagerImpl::GetAvalancheVoteForBlock(const BlockHash &hash) const {
4756
4757 const CBlockIndex *pindex = m_chainman.m_blockman.LookupBlockIndex(hash);
4758
4759 // Unknown block.
4760 if (!pindex) {
4761 return -1;
4762 }
4763
4764 // Invalid block
4765 if (pindex->nStatus.isInvalid()) {
4766 return 1;
4767 }
4768
4769 // Parked block
4770 if (pindex->nStatus.isOnParkedChain()) {
4771 return 2;
4772 }
4773
4774 const CBlockIndex *pindexTip = m_chainman.ActiveChain().Tip();
4775 const CBlockIndex *pindexFork = LastCommonAncestor(pindex, pindexTip);
4776
4777 // Active block.
4778 if (pindex == pindexFork) {
4779 return 0;
4780 }
4781
4782 // Fork block.
4783 if (pindexFork != pindexTip) {
4784 return 3;
4785 }
4786
4787 // Missing block data.
4788 if (!pindex->nStatus.hasData()) {
4789 return -2;
4790 }
4791
4792 // This block is built on top of the tip, we have the data, it
4793 // is pending connection or rejection.
4794 return -3;
4795};
4796
4797uint32_t
4798PeerManagerImpl::GetAvalancheVoteForTx(const avalanche::Processor &avalanche,
4799 const TxId &id) const {
4800 // Recently confirmed
4801 if (WITH_LOCK(m_recent_confirmed_transactions_mutex,
4802 return m_recent_confirmed_transactions.contains(id))) {
4803 return 0;
4804 }
4805
4806 CTransactionRef mempool_tx;
4807 {
4808 LOCK(::cs_main);
4809
4810 // Invalid tx. m_recent_rejects needs cs_main
4811 if (m_recent_rejects.contains(id)) {
4812 return 1;
4813 }
4814
4815 LOCK(m_mempool.cs);
4816
4817 // Finalized
4818 if (m_mempool.isAvalancheFinalizedPreConsensus(id)) {
4819 return 0;
4820 }
4821
4822 // Accepted in mempool
4823 if (auto iter = m_mempool.GetIter(id)) {
4824 mempool_tx = (**iter)->GetSharedTx();
4825 } else {
4826 // Conflicting tx
4827 if (m_mempool.withConflicting(
4828 [&id](const TxConflicting &conflicting) {
4829 return conflicting.HaveTx(id);
4830 })) {
4831 return 2;
4832 }
4833
4834 // Orphan tx
4835 if (m_mempool.withOrphanage([&id](const TxOrphanage &orphanage) {
4836 return orphanage.HaveTx(id);
4837 })) {
4838 return -2;
4839 }
4840 }
4841 } // release cs_main and mempool.cs locks
4842
4843 // isPolled() access the vote records, and should be accessed with cs_main
4844 // released.
4845 // If the tx is in the mempool...
4846 if (mempool_tx) {
4847 // ... and in the polled list
4848 if (avalanche.isPolled(mempool_tx)) {
4849 return 0;
4850 }
4851
4852 // ... but not in the polled list
4853 return -3;
4854 }
4855
4856 // Unknown tx
4857 return -1;
4858};
4859
4867 const avalanche::ProofId &id) {
4868 return avalanche.withPeerManager([&id](avalanche::PeerManager &pm) {
4869 // Rejected proof
4870 if (pm.isInvalid(id)) {
4871 return 1;
4872 }
4873
4874 // The proof is actively bound to a peer
4875 if (pm.isBoundToPeer(id)) {
4876 return 0;
4877 }
4878
4879 // Unknown proof
4880 if (!pm.exists(id)) {
4881 return -1;
4882 }
4883
4884 // Immature proof
4885 if (pm.isImmature(id)) {
4886 return 2;
4887 }
4888
4889 // Not immature, but in conflict with an actively bound proof
4890 if (pm.isInConflictingPool(id)) {
4891 return 3;
4892 }
4893
4894 // The proof is known, not rejected, not immature, not a conflict, but
4895 // for some reason unbound. This should not happen if the above pools
4896 // are managed correctly, but added for robustness.
4897 return -2;
4898 });
4899};
4900
4901void PeerManagerImpl::ProcessBlock(const Config &config, CNode &node,
4902 const std::shared_ptr<const CBlock> &block,
4903 bool force_processing,
4904 bool min_pow_checked) {
4905 bool new_block{false};
4906 m_chainman.ProcessNewBlock(block, force_processing, min_pow_checked,
4907 &new_block, m_avalanche);
4908 if (new_block) {
4909 node.m_last_block_time = GetTime<std::chrono::seconds>();
4910 // In case this block came from a different peer than we requested
4911 // from, we can erase the block request now anyway (as we just stored
4912 // this block to disk).
4913 LOCK(cs_main);
4914 RemoveBlockRequest(block->GetHash(), std::nullopt);
4915 } else {
4916 LOCK(cs_main);
4917 mapBlockSource.erase(block->GetHash());
4918 }
4919}
4920
4921void PeerManagerImpl::ProcessMessage(
4922 const Config &config, CNode &pfrom, const std::string &msg_type,
4923 CDataStream &vRecv, const std::chrono::microseconds time_received,
4924 const std::atomic<bool> &interruptMsgProc) {
4925 AssertLockHeld(g_msgproc_mutex);
4926
4927 LogPrint(BCLog::NETDEBUG, "received: %s (%u bytes) peer=%d\n",
4928 SanitizeString(msg_type), vRecv.size(), pfrom.GetId());
4929
4930 PeerRef peer = GetPeerRef(pfrom.GetId());
4931 if (peer == nullptr) {
4932 return;
4933 }
4934
4935 if (!m_avalanche && IsAvalancheMessageType(msg_type)) {
4937 "Avalanche is not initialized, ignoring %s message\n",
4938 msg_type);
4939 return;
4940 }
4941
4942 if (msg_type == NetMsgType::VERSION) {
4943 // Each connection can only send one version message
4944 if (pfrom.nVersion != 0) {
4945 LogPrint(BCLog::NET, "redundant version message from peer=%d\n",
4946 pfrom.GetId());
4947 return;
4948 }
4949
4950 int64_t nTime;
4951 CService addrMe;
4952 uint64_t nNonce = 1;
4953 ServiceFlags nServices;
4954 int nVersion;
4955 std::string cleanSubVer;
4956 int starting_height = -1;
4957 bool fRelay = true;
4958 uint64_t nExtraEntropy = 1;
4959
4960 vRecv >> nVersion >> Using<CustomUintFormatter<8>>(nServices) >> nTime;
4961 if (nTime < 0) {
4962 nTime = 0;
4963 }
4964 // Ignore the addrMe service bits sent by the peer
4965 vRecv.ignore(8);
4966 vRecv >> WithParams(CNetAddr::V1, addrMe);
4967 if (!pfrom.IsInboundConn()) {
4968 m_addrman.SetServices(pfrom.addr, nServices);
4969 }
4970 if (pfrom.ExpectServicesFromConn() &&
4971 !HasAllDesirableServiceFlags(nServices)) {
4973 "peer=%d does not offer the expected services "
4974 "(%08x offered, %08x expected); disconnecting\n",
4975 pfrom.GetId(), nServices,
4976 GetDesirableServiceFlags(nServices));
4977 pfrom.fDisconnect = true;
4978 return;
4979 }
4980
4981 if (pfrom.IsAvalancheOutboundConnection() &&
4982 !(nServices & NODE_AVALANCHE)) {
4983 LogPrint(
4985 "peer=%d does not offer the avalanche service; disconnecting\n",
4986 pfrom.GetId());
4987 pfrom.fDisconnect = true;
4988 return;
4989 }
4990
4991 if (nVersion < MIN_PEER_PROTO_VERSION) {
4992 // disconnect from peers older than this proto version
4994 "peer=%d using obsolete version %i; disconnecting\n",
4995 pfrom.GetId(), nVersion);
4996 pfrom.fDisconnect = true;
4997 return;
4998 }
4999
5000 if (!vRecv.empty()) {
5001 // The version message includes information about the sending node
5002 // which we don't use:
5003 // - 8 bytes (service bits)
5004 // - 16 bytes (ipv6 address)
5005 // - 2 bytes (port)
5006 vRecv.ignore(26);
5007 vRecv >> nNonce;
5008 }
5009 if (!vRecv.empty()) {
5010 std::string strSubVer;
5011 vRecv >> LIMITED_STRING(strSubVer, MAX_SUBVERSION_LENGTH);
5012 cleanSubVer = SanitizeString(strSubVer);
5013 }
5014 if (!vRecv.empty()) {
5015 vRecv >> starting_height;
5016 }
5017 if (!vRecv.empty()) {
5018 vRecv >> fRelay;
5019 }
5020 if (!vRecv.empty()) {
5021 vRecv >> nExtraEntropy;
5022 }
5023 // Disconnect if we connected to ourself
5024 if (pfrom.IsInboundConn() && !m_connman.CheckIncomingNonce(nNonce)) {
5025 LogPrintf("connected to self at %s, disconnecting\n",
5026 pfrom.addr.ToString());
5027 pfrom.fDisconnect = true;
5028 return;
5029 }
5030
5031 if (pfrom.IsInboundConn() && addrMe.IsRoutable()) {
5032 SeenLocal(addrMe);
5033 }
5034
5035 // Inbound peers send us their version message when they connect.
5036 // We send our version message in response.
5037 if (pfrom.IsInboundConn()) {
5038 PushNodeVersion(config, pfrom, *peer);
5039 }
5040
5041 // Change version
5042 const int greatest_common_version =
5043 std::min(nVersion, PROTOCOL_VERSION);
5044 pfrom.SetCommonVersion(greatest_common_version);
5045 pfrom.nVersion = nVersion;
5046
5047 const CNetMsgMaker msg_maker(greatest_common_version);
5048
5049 m_connman.PushMessage(&pfrom, msg_maker.Make(NetMsgType::VERACK));
5050
5051 // Signal ADDRv2 support (BIP155).
5052 m_connman.PushMessage(&pfrom, msg_maker.Make(NetMsgType::SENDADDRV2));
5053
5055 HasAllDesirableServiceFlags(nServices);
5056 peer->m_their_services = nServices;
5057 pfrom.SetAddrLocal(addrMe);
5058 {
5059 LOCK(pfrom.m_subver_mutex);
5060 pfrom.cleanSubVer = cleanSubVer;
5061 }
5062 peer->m_starting_height = starting_height;
5063
5064 // Only initialize the m_tx_relay data structure if:
5065 // - this isn't an outbound block-relay-only connection; and
5066 // - this isn't an outbound feeler connection, and
5067 // - fRelay=true or we're offering NODE_BLOOM to this peer
5068 // (NODE_BLOOM means that the peer may turn on tx relay later)
5069 if (!pfrom.IsBlockOnlyConn() && !pfrom.IsFeelerConn() &&
5070 (fRelay || (peer->m_our_services & NODE_BLOOM))) {
5071 auto *const tx_relay = peer->SetTxRelay();
5072 {
5073 LOCK(tx_relay->m_bloom_filter_mutex);
5074 // set to true after we get the first filter* message
5075 tx_relay->m_relay_txs = fRelay;
5076 }
5077 if (fRelay) {
5078 pfrom.m_relays_txs = true;
5079 }
5080 }
5081
5082 pfrom.nRemoteHostNonce = nNonce;
5083 pfrom.nRemoteExtraEntropy = nExtraEntropy;
5084
5085 // Potentially mark this peer as a preferred download peer.
5086 {
5087 LOCK(cs_main);
5088 CNodeState *state = State(pfrom.GetId());
5089 state->fPreferredDownload =
5090 (!pfrom.IsInboundConn() ||
5092 !pfrom.IsAddrFetchConn() && CanServeBlocks(*peer);
5093 m_num_preferred_download_peers += state->fPreferredDownload;
5094 }
5095
5096 // Attempt to initialize address relay for outbound peers and use result
5097 // to decide whether to send GETADDR, so that we don't send it to
5098 // inbound or outbound block-relay-only peers.
5099 bool send_getaddr{false};
5100 if (!pfrom.IsInboundConn()) {
5101 send_getaddr = SetupAddressRelay(pfrom, *peer);
5102 }
5103 if (send_getaddr) {
5104 // Do a one-time address fetch to help populate/update our addrman.
5105 // If we're starting up for the first time, our addrman may be
5106 // pretty empty, so this mechanism is important to help us connect
5107 // to the network.
5108 // We skip this for block-relay-only peers. We want to avoid
5109 // potentially leaking addr information and we do not want to
5110 // indicate to the peer that we will participate in addr relay.
5111 m_connman.PushMessage(&pfrom, CNetMsgMaker(greatest_common_version)
5112 .Make(NetMsgType::GETADDR));
5113 peer->m_getaddr_sent = true;
5114 // When requesting a getaddr, accept an additional MAX_ADDR_TO_SEND
5115 // addresses in response (bypassing the
5116 // MAX_ADDR_PROCESSING_TOKEN_BUCKET limit).
5117 WITH_LOCK(peer->m_addr_token_bucket_mutex,
5118 peer->m_addr_token_bucket += m_opts.max_addr_to_send);
5119 }
5120
5121 if (!pfrom.IsInboundConn()) {
5122 // For non-inbound connections, we update the addrman to record
5123 // connection success so that addrman will have an up-to-date
5124 // notion of which peers are online and available.
5125 //
5126 // While we strive to not leak information about block-relay-only
5127 // connections via the addrman, not moving an address to the tried
5128 // table is also potentially detrimental because new-table entries
5129 // are subject to eviction in the event of addrman collisions. We
5130 // mitigate the information-leak by never calling
5131 // AddrMan::Connected() on block-relay-only peers; see
5132 // FinalizeNode().
5133 //
5134 // This moves an address from New to Tried table in Addrman,
5135 // resolves tried-table collisions, etc.
5136 m_addrman.Good(pfrom.addr);
5137 }
5138
5139 std::string remoteAddr;
5140 if (fLogIPs) {
5141 remoteAddr = ", peeraddr=" + pfrom.addr.ToString();
5142 }
5143
5145 "receive version message: [%s] %s: version %d, blocks=%d, "
5146 "us=%s, txrelay=%d, peer=%d%s\n",
5147 pfrom.addr.ToString(), cleanSubVer, pfrom.nVersion,
5148 peer->m_starting_height, addrMe.ToString(), fRelay,
5149 pfrom.GetId(), remoteAddr);
5150
5151 int64_t currentTime = GetTime();
5152 int64_t nTimeOffset = nTime - currentTime;
5153 pfrom.nTimeOffset = nTimeOffset;
5154 if (nTime < int64_t(m_chainparams.GenesisBlock().nTime)) {
5155 // Ignore time offsets that are improbable (before the Genesis
5156 // block) and may underflow our adjusted time.
5157 Misbehaving(*peer, "Ignoring invalid timestamp in version message");
5158 } else if (!pfrom.IsInboundConn()) {
5159 // Don't use timedata samples from inbound peers to make it
5160 // harder for others to tamper with our adjusted time.
5161 AddTimeData(pfrom.addr, nTimeOffset);
5162 }
5163
5164 // Feeler connections exist only to verify if address is online.
5165 if (pfrom.IsFeelerConn()) {
5167 "feeler connection completed peer=%d; disconnecting\n",
5168 pfrom.GetId());
5169 pfrom.fDisconnect = true;
5170 }
5171 return;
5172 }
5173
5174 if (pfrom.nVersion == 0) {
5175 // Must have a version message before anything else
5176 Misbehaving(*peer, "non-version message before version handshake");
5177 return;
5178 }
5179
5180 // At this point, the outgoing message serialization version can't change.
5181 const CNetMsgMaker msgMaker(pfrom.GetCommonVersion());
5182
5183 if (msg_type == NetMsgType::VERACK) {
5184 if (pfrom.fSuccessfullyConnected) {
5186 "ignoring redundant verack message from peer=%d\n",
5187 pfrom.GetId());
5188 return;
5189 }
5190
5191 if (!pfrom.IsInboundConn()) {
5192 LogPrintf(
5193 "New outbound peer connected: version: %d, blocks=%d, "
5194 "peer=%d%s (%s)\n",
5195 pfrom.nVersion.load(), peer->m_starting_height, pfrom.GetId(),
5196 (fLogIPs ? strprintf(", peeraddr=%s", pfrom.addr.ToString())
5197 : ""),
5198 pfrom.ConnectionTypeAsString());
5199 }
5200
5202 // Tell our peer we are willing to provide version 1
5203 // cmpctblocks. However, we do not request new block announcements
5204 // using cmpctblock messages. We send this to non-NODE NETWORK peers
5205 // as well, because they may wish to request compact blocks from us.
5206 m_connman.PushMessage(
5207 &pfrom,
5208 msgMaker.Make(NetMsgType::SENDCMPCT, /*high_bandwidth=*/false,
5209 /*version=*/CMPCTBLOCKS_VERSION));
5210 }
5211
5212 if (m_avalanche) {
5213 if (m_avalanche->sendHello(&pfrom)) {
5214 auto localProof = m_avalanche->getLocalProof();
5215
5216 if (localProof) {
5217 AddKnownProof(*peer, localProof->getId());
5218 // Add our proof id to the list or the recently announced
5219 // proof INVs to this peer. This is used for filtering which
5220 // INV can be requested for download.
5221 peer->m_proof_relay->m_recently_announced_proofs.insert(
5222 localProof->getId());
5223 }
5224 }
5225 }
5226
5227 if (auto tx_relay = peer->GetTxRelay()) {
5228 // `TxRelay::m_tx_inventory_to_send` must be empty before the
5229 // version handshake is completed as
5230 // `TxRelay::m_next_inv_send_time` is first initialised in
5231 // `SendMessages` after the verack is received. Any transactions
5232 // received during the version handshake would otherwise
5233 // immediately be advertised without random delay, potentially
5234 // leaking the time of arrival to a spy.
5235 Assume(WITH_LOCK(tx_relay->m_tx_inventory_mutex,
5236 return tx_relay->m_tx_inventory_to_send.empty() &&
5237 tx_relay->m_next_inv_send_time == 0s));
5238 }
5239
5240 pfrom.fSuccessfullyConnected = true;
5241 return;
5242 }
5243
5244 if (!pfrom.fSuccessfullyConnected) {
5245 // Must have a verack message before anything else
5246 Misbehaving(*peer, "non-verack message before version handshake");
5247 return;
5248 }
5249
5250 if (msg_type == NetMsgType::ADDR || msg_type == NetMsgType::ADDRV2) {
5251 const auto ser_params{
5252 msg_type == NetMsgType::ADDRV2
5253 ?
5254 // Set V2 param so that the CNetAddr and CAddress unserialize
5255 // methods know that an address in v2 format is coming.
5258 };
5259
5260 std::vector<CAddress> vAddr;
5261
5262 vRecv >> WithParams(ser_params, vAddr);
5263
5264 if (!SetupAddressRelay(pfrom, *peer)) {
5265 LogPrint(BCLog::NET, "ignoring %s message from %s peer=%d\n",
5266 msg_type, pfrom.ConnectionTypeAsString(), pfrom.GetId());
5267 return;
5268 }
5269
5270 if (vAddr.size() > m_opts.max_addr_to_send) {
5271 Misbehaving(*peer, strprintf("%s message size = %u", msg_type,
5272 vAddr.size()));
5273 return;
5274 }
5275
5276 // Store the new addresses
5277 std::vector<CAddress> vAddrOk;
5278 const auto current_a_time{Now<NodeSeconds>()};
5279
5280 // Update/increment addr rate limiting bucket.
5281 const auto current_time = GetTime<std::chrono::microseconds>();
5282 {
5283 LOCK(peer->m_addr_token_bucket_mutex);
5284 if (peer->m_addr_token_bucket < MAX_ADDR_PROCESSING_TOKEN_BUCKET) {
5285 // Don't increment bucket if it's already full
5286 const auto time_diff =
5287 std::max(current_time - peer->m_addr_token_timestamp, 0us);
5288 const double increment =
5290 peer->m_addr_token_bucket =
5291 std::min<double>(peer->m_addr_token_bucket + increment,
5293 }
5294 }
5295 peer->m_addr_token_timestamp = current_time;
5296
5297 const bool rate_limited =
5299 uint64_t num_proc = 0;
5300 uint64_t num_rate_limit = 0;
5301 Shuffle(vAddr.begin(), vAddr.end(), m_rng);
5302 for (CAddress &addr : vAddr) {
5303 if (interruptMsgProc) {
5304 return;
5305 }
5306
5307 {
5308 LOCK(peer->m_addr_token_bucket_mutex);
5309 // Apply rate limiting.
5310 if (peer->m_addr_token_bucket < 1.0) {
5311 if (rate_limited) {
5312 ++num_rate_limit;
5313 continue;
5314 }
5315 } else {
5316 peer->m_addr_token_bucket -= 1.0;
5317 }
5318 }
5319
5320 // We only bother storing full nodes, though this may include things
5321 // which we would not make an outbound connection to, in part
5322 // because we may make feeler connections to them.
5323 if (!MayHaveUsefulAddressDB(addr.nServices) &&
5325 continue;
5326 }
5327
5328 if (addr.nTime <= NodeSeconds{100000000s} ||
5329 addr.nTime > current_a_time + 10min) {
5330 addr.nTime = current_a_time - 5 * 24h;
5331 }
5332 AddAddressKnown(*peer, addr);
5333 if (m_banman &&
5334 (m_banman->IsDiscouraged(addr) || m_banman->IsBanned(addr))) {
5335 // Do not process banned/discouraged addresses beyond
5336 // remembering we received them
5337 continue;
5338 }
5339 ++num_proc;
5340 bool fReachable = IsReachable(addr);
5341 if (addr.nTime > current_a_time - 10min && !peer->m_getaddr_sent &&
5342 vAddr.size() <= 10 && addr.IsRoutable()) {
5343 // Relay to a limited number of other nodes
5344 RelayAddress(pfrom.GetId(), addr, fReachable);
5345 }
5346 // Do not store addresses outside our network
5347 if (fReachable) {
5348 vAddrOk.push_back(addr);
5349 }
5350 }
5351 peer->m_addr_processed += num_proc;
5352 peer->m_addr_rate_limited += num_rate_limit;
5354 "Received addr: %u addresses (%u processed, %u rate-limited) "
5355 "from peer=%d\n",
5356 vAddr.size(), num_proc, num_rate_limit, pfrom.GetId());
5357
5358 m_addrman.Add(vAddrOk, pfrom.addr, 2h);
5359 if (vAddr.size() < 1000) {
5360 peer->m_getaddr_sent = false;
5361 }
5362
5363 // AddrFetch: Require multiple addresses to avoid disconnecting on
5364 // self-announcements
5365 if (pfrom.IsAddrFetchConn() && vAddr.size() > 1) {
5367 "addrfetch connection completed peer=%d; disconnecting\n",
5368 pfrom.GetId());
5369 pfrom.fDisconnect = true;
5370 }
5371 return;
5372 }
5373
5374 if (msg_type == NetMsgType::SENDADDRV2) {
5375 peer->m_wants_addrv2 = true;
5376 return;
5377 }
5378
5379 if (msg_type == NetMsgType::SENDHEADERS) {
5380 peer->m_prefers_headers = true;
5381 return;
5382 }
5383
5384 if (msg_type == NetMsgType::SENDCMPCT) {
5385 bool sendcmpct_hb{false};
5386 uint64_t sendcmpct_version{0};
5387 vRecv >> sendcmpct_hb >> sendcmpct_version;
5388
5389 if (sendcmpct_version != CMPCTBLOCKS_VERSION) {
5390 return;
5391 }
5392
5393 LOCK(cs_main);
5394 CNodeState *nodestate = State(pfrom.GetId());
5395 nodestate->m_provides_cmpctblocks = true;
5396 nodestate->m_requested_hb_cmpctblocks = sendcmpct_hb;
5397 // save whether peer selects us as BIP152 high-bandwidth peer
5398 // (receiving sendcmpct(1) signals high-bandwidth,
5399 // sendcmpct(0) low-bandwidth)
5400 pfrom.m_bip152_highbandwidth_from = sendcmpct_hb;
5401 return;
5402 }
5403
5404 if (msg_type == NetMsgType::INV) {
5405 std::vector<CInv> vInv;
5406 vRecv >> vInv;
5407 if (vInv.size() > MAX_INV_SZ) {
5408 Misbehaving(*peer, strprintf("inv message size = %u", vInv.size()));
5409 return;
5410 }
5411
5412 const bool reject_tx_invs{RejectIncomingTxs(pfrom)};
5413
5414 const auto current_time{GetTime<std::chrono::microseconds>()};
5415 std::optional<BlockHash> best_block;
5416
5417 auto logInv = [&](const CInv &inv, bool fAlreadyHave) {
5418 LogPrint(BCLog::NET, "got inv: %s %s peer=%d\n", inv.ToString(),
5419 fAlreadyHave ? "have" : "new", pfrom.GetId());
5420 };
5421
5422 for (CInv &inv : vInv) {
5423 if (interruptMsgProc) {
5424 return;
5425 }
5426
5427 if (inv.IsMsgStakeContender()) {
5428 // Ignore invs with stake contenders. This type is only used for
5429 // polling.
5430 continue;
5431 }
5432
5433 if (inv.IsMsgBlk()) {
5434 LOCK(cs_main);
5435 const bool fAlreadyHave = AlreadyHaveBlock(BlockHash(inv.hash));
5436 logInv(inv, fAlreadyHave);
5437
5438 BlockHash hash{inv.hash};
5439 UpdateBlockAvailability(pfrom.GetId(), hash);
5440 if (!fAlreadyHave && !m_chainman.m_blockman.LoadingBlocks() &&
5441 !IsBlockRequested(hash)) {
5442 // Headers-first is the primary method of announcement on
5443 // the network. If a node fell back to sending blocks by
5444 // inv, it may be for a re-org, or because we haven't
5445 // completed initial headers sync. The final block hash
5446 // provided should be the highest, so send a getheaders and
5447 // then fetch the blocks we need to catch up.
5448 best_block = std::move(hash);
5449 }
5450
5451 continue;
5452 }
5453
5454 if (inv.IsMsgProof()) {
5455 if (!m_avalanche) {
5456 continue;
5457 }
5458 const avalanche::ProofId proofid(inv.hash);
5459 const bool fAlreadyHave = AlreadyHaveProof(proofid);
5460 logInv(inv, fAlreadyHave);
5461 AddKnownProof(*peer, proofid);
5462
5463 if (!fAlreadyHave && m_avalanche &&
5464 !m_chainman.IsInitialBlockDownload()) {
5465 const bool preferred = isPreferredDownloadPeer(pfrom);
5466
5467 LOCK(cs_proofrequest);
5468 AddProofAnnouncement(pfrom, proofid, current_time,
5469 preferred);
5470 }
5471 continue;
5472 }
5473
5474 if (inv.IsMsgTx()) {
5475 LOCK(cs_main);
5476 const TxId txid(inv.hash);
5477 const bool fAlreadyHave =
5478 AlreadyHaveTx(txid, /*include_reconsiderable=*/true);
5479 logInv(inv, fAlreadyHave);
5480
5481 AddKnownTx(*peer, txid);
5482 if (reject_tx_invs) {
5484 "transaction (%s) inv sent in violation of "
5485 "protocol, disconnecting peer=%d\n",
5486 txid.ToString(), pfrom.GetId());
5487 pfrom.fDisconnect = true;
5488 return;
5489 } else if (!fAlreadyHave &&
5490 !m_chainman.IsInitialBlockDownload()) {
5491 AddTxAnnouncement(pfrom, txid, current_time);
5492 }
5493
5494 continue;
5495 }
5496
5498 "Unknown inv type \"%s\" received from peer=%d\n",
5499 inv.ToString(), pfrom.GetId());
5500 }
5501
5502 if (best_block) {
5503 // If we haven't started initial headers-sync with this peer, then
5504 // consider sending a getheaders now. On initial startup, there's a
5505 // reliability vs bandwidth tradeoff, where we are only trying to do
5506 // initial headers sync with one peer at a time, with a long
5507 // timeout (at which point, if the sync hasn't completed, we will
5508 // disconnect the peer and then choose another). In the meantime,
5509 // as new blocks are found, we are willing to add one new peer per
5510 // block to sync with as well, to sync quicker in the case where
5511 // our initial peer is unresponsive (but less bandwidth than we'd
5512 // use if we turned on sync with all peers).
5513 LOCK(::cs_main);
5514 CNodeState &state{*Assert(State(pfrom.GetId()))};
5515 if (state.fSyncStarted ||
5516 (!peer->m_inv_triggered_getheaders_before_sync &&
5517 *best_block != m_last_block_inv_triggering_headers_sync)) {
5518 if (MaybeSendGetHeaders(
5519 pfrom, GetLocator(m_chainman.m_best_header), *peer)) {
5520 LogPrint(BCLog::NET, "getheaders (%d) %s to peer=%d\n",
5521 m_chainman.m_best_header->nHeight,
5522 best_block->ToString(), pfrom.GetId());
5523 }
5524 if (!state.fSyncStarted) {
5525 peer->m_inv_triggered_getheaders_before_sync = true;
5526 // Update the last block hash that triggered a new headers
5527 // sync, so that we don't turn on headers sync with more
5528 // than 1 new peer every new block.
5529 m_last_block_inv_triggering_headers_sync = *best_block;
5530 }
5531 }
5532 }
5533
5534 return;
5535 }
5536
5537 if (msg_type == NetMsgType::GETDATA) {
5538 std::vector<CInv> vInv;
5539 vRecv >> vInv;
5540 if (vInv.size() > MAX_INV_SZ) {
5541 Misbehaving(*peer,
5542 strprintf("getdata message size = %u", vInv.size()));
5543 return;
5544 }
5545
5546 LogPrint(BCLog::NET, "received getdata (%u invsz) peer=%d\n",
5547 vInv.size(), pfrom.GetId());
5548
5549 if (vInv.size() > 0) {
5550 LogPrint(BCLog::NET, "received getdata for: %s peer=%d\n",
5551 vInv[0].ToString(), pfrom.GetId());
5552 }
5553
5554 {
5555 LOCK(peer->m_getdata_requests_mutex);
5556 peer->m_getdata_requests.insert(peer->m_getdata_requests.end(),
5557 vInv.begin(), vInv.end());
5558 ProcessGetData(config, pfrom, *peer, interruptMsgProc);
5559 }
5560
5561 return;
5562 }
5563
5564 if (msg_type == NetMsgType::GETBLOCKS) {
5565 CBlockLocator locator;
5566 uint256 hashStop;
5567 vRecv >> locator >> hashStop;
5568
5569 if (locator.vHave.size() > MAX_LOCATOR_SZ) {
5571 "getblocks locator size %lld > %d, disconnect peer=%d\n",
5572 locator.vHave.size(), MAX_LOCATOR_SZ, pfrom.GetId());
5573 pfrom.fDisconnect = true;
5574 return;
5575 }
5576
5577 // We might have announced the currently-being-connected tip using a
5578 // compact block, which resulted in the peer sending a getblocks
5579 // request, which we would otherwise respond to without the new block.
5580 // To avoid this situation we simply verify that we are on our best
5581 // known chain now. This is super overkill, but we handle it better
5582 // for getheaders requests, and there are no known nodes which support
5583 // compact blocks but still use getblocks to request blocks.
5584 {
5585 std::shared_ptr<const CBlock> a_recent_block;
5586 {
5587 LOCK(m_most_recent_block_mutex);
5588 a_recent_block = m_most_recent_block;
5589 }
5591 if (!m_chainman.ActiveChainstate().ActivateBestChain(
5592 state, a_recent_block, m_avalanche)) {
5593 LogPrint(BCLog::NET, "failed to activate chain (%s)\n",
5594 state.ToString());
5595 }
5596 }
5597
5598 LOCK(cs_main);
5599
5600 // Find the last block the caller has in the main chain
5601 const CBlockIndex *pindex =
5602 m_chainman.ActiveChainstate().FindForkInGlobalIndex(locator);
5603
5604 // Send the rest of the chain
5605 if (pindex) {
5606 pindex = m_chainman.ActiveChain().Next(pindex);
5607 }
5608 int nLimit = 500;
5609 LogPrint(BCLog::NET, "getblocks %d to %s limit %d from peer=%d\n",
5610 (pindex ? pindex->nHeight : -1),
5611 hashStop.IsNull() ? "end" : hashStop.ToString(), nLimit,
5612 pfrom.GetId());
5613 for (; pindex; pindex = m_chainman.ActiveChain().Next(pindex)) {
5614 if (pindex->GetBlockHash() == hashStop) {
5615 LogPrint(BCLog::NET, " getblocks stopping at %d %s\n",
5616 pindex->nHeight, pindex->GetBlockHash().ToString());
5617 break;
5618 }
5619 // If pruning, don't inv blocks unless we have on disk and are
5620 // likely to still have for some reasonable time window (1 hour)
5621 // that block relay might require.
5622 const int nPrunedBlocksLikelyToHave =
5624 3600 / m_chainparams.GetConsensus().nPowTargetSpacing;
5625 if (m_chainman.m_blockman.IsPruneMode() &&
5626 (!pindex->nStatus.hasData() ||
5627 pindex->nHeight <= m_chainman.ActiveChain().Tip()->nHeight -
5628 nPrunedBlocksLikelyToHave)) {
5629 LogPrint(
5630 BCLog::NET,
5631 " getblocks stopping, pruned or too old block at %d %s\n",
5632 pindex->nHeight, pindex->GetBlockHash().ToString());
5633 break;
5634 }
5635 WITH_LOCK(
5636 peer->m_block_inv_mutex,
5637 peer->m_blocks_for_inv_relay.push_back(pindex->GetBlockHash()));
5638 if (--nLimit <= 0) {
5639 // When this block is requested, we'll send an inv that'll
5640 // trigger the peer to getblocks the next batch of inventory.
5641 LogPrint(BCLog::NET, " getblocks stopping at limit %d %s\n",
5642 pindex->nHeight, pindex->GetBlockHash().ToString());
5643 WITH_LOCK(peer->m_block_inv_mutex, {
5644 peer->m_continuation_block = pindex->GetBlockHash();
5645 });
5646 break;
5647 }
5648 }
5649 return;
5650 }
5651
5652 if (msg_type == NetMsgType::GETBLOCKTXN) {
5654 vRecv >> req;
5655
5656 std::shared_ptr<const CBlock> recent_block;
5657 {
5658 LOCK(m_most_recent_block_mutex);
5659 if (m_most_recent_block_hash == req.blockhash) {
5660 recent_block = m_most_recent_block;
5661 }
5662 // Unlock m_most_recent_block_mutex to avoid cs_main lock inversion
5663 }
5664 if (recent_block) {
5665 SendBlockTransactions(pfrom, *peer, *recent_block, req);
5666 return;
5667 }
5668
5669 FlatFilePos block_pos{};
5670 {
5671 LOCK(cs_main);
5672
5673 const CBlockIndex *pindex =
5674 m_chainman.m_blockman.LookupBlockIndex(req.blockhash);
5675 if (!pindex || !pindex->nStatus.hasData()) {
5676 LogPrint(
5677 BCLog::NET,
5678 "Peer %d sent us a getblocktxn for a block we don't have\n",
5679 pfrom.GetId());
5680 return;
5681 }
5682
5683 if (pindex->nHeight >=
5684 m_chainman.ActiveChain().Height() - MAX_BLOCKTXN_DEPTH) {
5685 block_pos = pindex->GetBlockPos();
5686 }
5687 }
5688
5689 if (!block_pos.IsNull()) {
5690 CBlock block;
5691 const bool ret{
5692 m_chainman.m_blockman.ReadBlockFromDisk(block, block_pos)};
5693 // If height is above MAX_BLOCKTXN_DEPTH then this block cannot get
5694 // pruned after we release cs_main above, so this read should never
5695 // fail.
5696 assert(ret);
5697
5698 SendBlockTransactions(pfrom, *peer, block, req);
5699 return;
5700 }
5701
5702 // If an older block is requested (should never happen in practice,
5703 // but can happen in tests) send a block response instead of a
5704 // blocktxn response. Sending a full block response instead of a
5705 // small blocktxn response is preferable in the case where a peer
5706 // might maliciously send lots of getblocktxn requests to trigger
5707 // expensive disk reads, because it will require the peer to
5708 // actually receive all the data read from disk over the network.
5710 "Peer %d sent us a getblocktxn for a block > %i deep\n",
5711 pfrom.GetId(), MAX_BLOCKTXN_DEPTH);
5712 CInv inv;
5713 inv.type = MSG_BLOCK;
5714 inv.hash = req.blockhash;
5715 WITH_LOCK(peer->m_getdata_requests_mutex,
5716 peer->m_getdata_requests.push_back(inv));
5717 // The message processing loop will go around again (without pausing)
5718 // and we'll respond then (without cs_main)
5719 return;
5720 }
5721
5722 if (msg_type == NetMsgType::GETHEADERS) {
5723 CBlockLocator locator;
5724 BlockHash hashStop;
5725 vRecv >> locator >> hashStop;
5726
5727 if (locator.vHave.size() > MAX_LOCATOR_SZ) {
5729 "getheaders locator size %lld > %d, disconnect peer=%d\n",
5730 locator.vHave.size(), MAX_LOCATOR_SZ, pfrom.GetId());
5731 pfrom.fDisconnect = true;
5732 return;
5733 }
5734
5735 if (m_chainman.m_blockman.LoadingBlocks()) {
5736 LogPrint(
5737 BCLog::NET,
5738 "Ignoring getheaders from peer=%d while importing/reindexing\n",
5739 pfrom.GetId());
5740 return;
5741 }
5742
5743 LOCK(cs_main);
5744
5745 // Note that if we were to be on a chain that forks from the
5746 // checkpointed chain, then serving those headers to a peer that has
5747 // seen the checkpointed chain would cause that peer to disconnect us.
5748 // Requiring that our chainwork exceed the minimum chainwork is a
5749 // protection against being fed a bogus chain when we started up for
5750 // the first time and getting partitioned off the honest network for
5751 // serving that chain to others.
5752 if (m_chainman.ActiveTip() == nullptr ||
5753 (m_chainman.ActiveTip()->nChainWork <
5754 m_chainman.MinimumChainWork() &&
5757 "Ignoring getheaders from peer=%d because active chain "
5758 "has too little work; sending empty response\n",
5759 pfrom.GetId());
5760 // Just respond with an empty headers message, to tell the peer to
5761 // go away but not treat us as unresponsive.
5762 m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::HEADERS,
5763 std::vector<CBlock>()));
5764 return;
5765 }
5766
5767 CNodeState *nodestate = State(pfrom.GetId());
5768 const CBlockIndex *pindex = nullptr;
5769 if (locator.IsNull()) {
5770 // If locator is null, return the hashStop block
5771 pindex = m_chainman.m_blockman.LookupBlockIndex(hashStop);
5772 if (!pindex) {
5773 return;
5774 }
5775
5776 if (!BlockRequestAllowed(pindex)) {
5778 "%s: ignoring request from peer=%i for old block "
5779 "header that isn't in the main chain\n",
5780 __func__, pfrom.GetId());
5781 return;
5782 }
5783 } else {
5784 // Find the last block the caller has in the main chain
5785 pindex =
5786 m_chainman.ActiveChainstate().FindForkInGlobalIndex(locator);
5787 if (pindex) {
5788 pindex = m_chainman.ActiveChain().Next(pindex);
5789 }
5790 }
5791
5792 // we must use CBlocks, as CBlockHeaders won't include the 0x00 nTx
5793 // count at the end
5794 std::vector<CBlock> vHeaders;
5795 int nLimit = MAX_HEADERS_RESULTS;
5796 LogPrint(BCLog::NET, "getheaders %d to %s from peer=%d\n",
5797 (pindex ? pindex->nHeight : -1),
5798 hashStop.IsNull() ? "end" : hashStop.ToString(),
5799 pfrom.GetId());
5800 for (; pindex; pindex = m_chainman.ActiveChain().Next(pindex)) {
5801 vHeaders.push_back(pindex->GetBlockHeader());
5802 if (--nLimit <= 0 || pindex->GetBlockHash() == hashStop) {
5803 break;
5804 }
5805 }
5806 // pindex can be nullptr either if we sent
5807 // m_chainman.ActiveChain().Tip() OR if our peer has
5808 // m_chainman.ActiveChain().Tip() (and thus we are sending an empty
5809 // headers message). In both cases it's safe to update
5810 // pindexBestHeaderSent to be our tip.
5811 //
5812 // It is important that we simply reset the BestHeaderSent value here,
5813 // and not max(BestHeaderSent, newHeaderSent). We might have announced
5814 // the currently-being-connected tip using a compact block, which
5815 // resulted in the peer sending a headers request, which we respond to
5816 // without the new block. By resetting the BestHeaderSent, we ensure we
5817 // will re-announce the new block via headers (or compact blocks again)
5818 // in the SendMessages logic.
5819 nodestate->pindexBestHeaderSent =
5820 pindex ? pindex : m_chainman.ActiveChain().Tip();
5821 m_connman.PushMessage(&pfrom,
5822 msgMaker.Make(NetMsgType::HEADERS, vHeaders));
5823 return;
5824 }
5825
5826 if (msg_type == NetMsgType::TX) {
5827 if (RejectIncomingTxs(pfrom)) {
5829 "transaction sent in violation of protocol peer=%d\n",
5830 pfrom.GetId());
5831 pfrom.fDisconnect = true;
5832 return;
5833 }
5834
5835 // Stop processing the transaction early if we are still in IBD since we
5836 // don't have enough information to validate it yet. Sending unsolicited
5837 // transactions is not considered a protocol violation, so don't punish
5838 // the peer.
5839 if (m_chainman.IsInitialBlockDownload()) {
5840 return;
5841 }
5842
5843 CTransactionRef ptx;
5844 vRecv >> ptx;
5845 const CTransaction &tx = *ptx;
5846 const TxId &txid = tx.GetId();
5847 AddKnownTx(*peer, txid);
5848
5849 {
5850 LOCK(cs_main);
5851
5852 m_txrequest.ReceivedResponse(pfrom.GetId(), txid);
5853
5854 if (AlreadyHaveTx(txid, /*include_reconsiderable=*/true)) {
5856 // Always relay transactions received from peers with
5857 // forcerelay permission, even if they were already in the
5858 // mempool, allowing the node to function as a gateway for
5859 // nodes hidden behind it.
5860 if (!m_mempool.exists(tx.GetId())) {
5861 LogPrintf(
5862 "Not relaying non-mempool transaction %s from "
5863 "forcerelay peer=%d\n",
5864 tx.GetId().ToString(), pfrom.GetId());
5865 } else {
5866 LogPrintf("Force relaying tx %s from peer=%d\n",
5867 tx.GetId().ToString(), pfrom.GetId());
5868 RelayTransaction(tx.GetId());
5869 }
5870 }
5871
5872 if (m_recent_rejects_package_reconsiderable.contains(txid)) {
5873 // When a transaction is already in
5874 // m_recent_rejects_package_reconsiderable, we shouldn't
5875 // submit it by itself again. However, look for a matching
5876 // child in the orphanage, as it is possible that they
5877 // succeed as a package.
5878 LogPrint(
5880 "found tx %s in reconsiderable rejects, looking for "
5881 "child in orphanage\n",
5882 txid.ToString());
5883 if (auto package_to_validate{
5884 Find1P1CPackage(ptx, pfrom.GetId())}) {
5885 const auto package_result{ProcessNewPackage(
5886 m_chainman.ActiveChainstate(), m_mempool,
5887 package_to_validate->m_txns,
5888 /*test_accept=*/false)};
5890 "package evaluation for %s: %s (%s)\n",
5891 package_to_validate->ToString(),
5892 package_result.m_state.IsValid()
5893 ? "package accepted"
5894 : "package rejected",
5895 package_result.m_state.ToString());
5896 ProcessPackageResult(package_to_validate.value(),
5897 package_result);
5898 }
5899 }
5900 // If a tx is detected by m_recent_rejects it is ignored.
5901 // Because we haven't submitted the tx to our mempool, we won't
5902 // have computed a DoS score for it or determined exactly why we
5903 // consider it invalid.
5904 //
5905 // This means we won't penalize any peer subsequently relaying a
5906 // DoSy tx (even if we penalized the first peer who gave it to
5907 // us) because we have to account for m_recent_rejects showing
5908 // false positives. In other words, we shouldn't penalize a peer
5909 // if we aren't *sure* they submitted a DoSy tx.
5910 //
5911 // Note that m_recent_rejects doesn't just record DoSy or
5912 // invalid transactions, but any tx not accepted by the mempool,
5913 // which may be due to node policy (vs. consensus). So we can't
5914 // blanket penalize a peer simply for relaying a tx that our
5915 // m_recent_rejects has caught, regardless of false positives.
5916 return;
5917 }
5918
5919 const MempoolAcceptResult result =
5920 m_chainman.ProcessTransaction(ptx);
5921 const TxValidationState &state = result.m_state;
5922
5923 if (result.m_result_type ==
5925 ProcessValidTx(pfrom.GetId(), ptx);
5926 pfrom.m_last_tx_time = GetTime<std::chrono::seconds>();
5927 } else if (state.GetResult() ==
5929 // It may be the case that the orphans parents have all been
5930 // rejected.
5931 bool fRejectedParents = false;
5932
5933 // Deduplicate parent txids, so that we don't have to loop over
5934 // the same parent txid more than once down below.
5935 std::vector<TxId> unique_parents;
5936 unique_parents.reserve(tx.vin.size());
5937 for (const CTxIn &txin : tx.vin) {
5938 // We start with all parents, and then remove duplicates
5939 // below.
5940 unique_parents.push_back(txin.prevout.GetTxId());
5941 }
5942 std::sort(unique_parents.begin(), unique_parents.end());
5943 unique_parents.erase(
5944 std::unique(unique_parents.begin(), unique_parents.end()),
5945 unique_parents.end());
5946
5947 // Distinguish between parents in m_recent_rejects and
5948 // m_recent_rejects_package_reconsiderable. We can tolerate
5949 // having up to 1 parent in
5950 // m_recent_rejects_package_reconsiderable since we submit 1p1c
5951 // packages. However, fail immediately if any are in
5952 // m_recent_rejects.
5953 std::optional<TxId> rejected_parent_reconsiderable;
5954 for (const TxId &parent_txid : unique_parents) {
5955 if (m_recent_rejects.contains(parent_txid)) {
5956 fRejectedParents = true;
5957 break;
5958 }
5959
5960 if (m_recent_rejects_package_reconsiderable.contains(
5961 parent_txid) &&
5962 !m_mempool.exists(parent_txid)) {
5963 // More than 1 parent in
5964 // m_recent_rejects_package_reconsiderable:
5965 // 1p1c will not be sufficient to accept this package,
5966 // so just give up here.
5967 if (rejected_parent_reconsiderable.has_value()) {
5968 fRejectedParents = true;
5969 break;
5970 }
5971 rejected_parent_reconsiderable = parent_txid;
5972 }
5973 }
5974 if (!fRejectedParents) {
5975 const auto current_time{
5976 GetTime<std::chrono::microseconds>()};
5977
5978 for (const TxId &parent_txid : unique_parents) {
5979 // FIXME: MSG_TX should use a TxHash, not a TxId.
5980 AddKnownTx(*peer, parent_txid);
5981 // Exclude m_recent_rejects_package_reconsiderable: the
5982 // missing parent may have been previously rejected for
5983 // being too low feerate. This orphan might CPFP it.
5984 if (!AlreadyHaveTx(parent_txid,
5985 /*include_reconsiderable=*/false)) {
5986 AddTxAnnouncement(pfrom, parent_txid, current_time);
5987 }
5988 }
5989
5990 // NO_THREAD_SAFETY_ANALYSIS because we can't annotate for
5991 // g_msgproc_mutex
5992 if (unsigned int nEvicted =
5993 m_mempool.withOrphanage(
5994 [&](TxOrphanage &orphanage)
5996 if (orphanage.AddTx(ptx,
5997 pfrom.GetId())) {
5998 AddToCompactExtraTransactions(ptx);
5999 }
6000 return orphanage.LimitTxs(
6001 m_opts.max_orphan_txs, m_rng);
6002 }) > 0) {
6004 "orphanage overflow, removed %u tx\n",
6005 nEvicted);
6006 }
6007
6008 // Once added to the orphan pool, a tx is considered
6009 // AlreadyHave, and we shouldn't request it anymore.
6010 m_txrequest.ForgetInvId(tx.GetId());
6011
6012 } else {
6014 "not keeping orphan with rejected parents %s\n",
6015 tx.GetId().ToString());
6016 // We will continue to reject this tx since it has rejected
6017 // parents so avoid re-requesting it from other peers.
6018 m_recent_rejects.insert(tx.GetId());
6019 m_txrequest.ForgetInvId(tx.GetId());
6020 }
6021 }
6022 if (state.IsInvalid()) {
6023 ProcessInvalidTx(pfrom.GetId(), ptx, state,
6024 /*maybe_add_extra_compact_tx=*/true);
6025 }
6026 // When a transaction fails for TX_PACKAGE_RECONSIDERABLE, look for
6027 // a matching child in the orphanage, as it is possible that they
6028 // succeed as a package.
6029 if (state.GetResult() ==
6031 LogPrint(
6033 "tx %s failed but reconsiderable, looking for child in "
6034 "orphanage\n",
6035 txid.ToString());
6036 if (auto package_to_validate{
6037 Find1P1CPackage(ptx, pfrom.GetId())}) {
6038 const auto package_result{ProcessNewPackage(
6039 m_chainman.ActiveChainstate(), m_mempool,
6040 package_to_validate->m_txns, /*test_accept=*/false)};
6042 "package evaluation for %s: %s (%s)\n",
6043 package_to_validate->ToString(),
6044 package_result.m_state.IsValid()
6045 ? "package accepted"
6046 : "package rejected",
6047 package_result.m_state.ToString());
6048 ProcessPackageResult(package_to_validate.value(),
6049 package_result);
6050 }
6051 }
6052
6053 if (state.GetResult() ==
6055 // Once added to the conflicting pool, a tx is considered
6056 // AlreadyHave, and we shouldn't request it anymore.
6057 m_txrequest.ForgetInvId(tx.GetId());
6058
6059 unsigned int nEvicted{0};
6060 // NO_THREAD_SAFETY_ANALYSIS because of g_msgproc_mutex required
6061 // in the lambda for m_rng
6062 m_mempool.withConflicting(
6063 [&](TxConflicting &conflicting) NO_THREAD_SAFETY_ANALYSIS {
6064 conflicting.AddTx(ptx, pfrom.GetId());
6065 nEvicted = conflicting.LimitTxs(
6066 m_opts.max_conflicting_txs, m_rng);
6067 });
6068
6069 if (nEvicted > 0) {
6071 "conflicting pool overflow, removed %u tx\n",
6072 nEvicted);
6073 }
6074 }
6075 } // Release cs_main
6076
6077 return;
6078 }
6079
6080 if (msg_type == NetMsgType::CMPCTBLOCK) {
6081 // Ignore cmpctblock received while importing
6082 if (m_chainman.m_blockman.LoadingBlocks()) {
6084 "Unexpected cmpctblock message received from peer %d\n",
6085 pfrom.GetId());
6086 return;
6087 }
6088
6089 CBlockHeaderAndShortTxIDs cmpctblock;
6090 try {
6091 vRecv >> cmpctblock;
6092 } catch (std::ios_base::failure &e) {
6093 // This block has non contiguous or overflowing indexes
6094 Misbehaving(*peer, "cmpctblock-bad-indexes");
6095 return;
6096 }
6097
6098 bool received_new_header = false;
6099 const auto blockhash = cmpctblock.header.GetHash();
6100
6101 {
6102 LOCK(cs_main);
6103
6104 const CBlockIndex *prev_block =
6105 m_chainman.m_blockman.LookupBlockIndex(
6106 cmpctblock.header.hashPrevBlock);
6107 if (!prev_block) {
6108 // Doesn't connect (or is genesis), instead of DoSing in
6109 // AcceptBlockHeader, request deeper headers
6110 if (!m_chainman.IsInitialBlockDownload()) {
6111 MaybeSendGetHeaders(
6112 pfrom, GetLocator(m_chainman.m_best_header), *peer);
6113 }
6114 return;
6115 }
6116 if (prev_block->nChainWork +
6117 CalculateHeadersWork({cmpctblock.header}) <
6118 GetAntiDoSWorkThreshold()) {
6119 // If we get a low-work header in a compact block, we can ignore
6120 // it.
6122 "Ignoring low-work compact block from peer %d\n",
6123 pfrom.GetId());
6124 return;
6125 }
6126
6127 if (!m_chainman.m_blockman.LookupBlockIndex(blockhash)) {
6128 received_new_header = true;
6129 }
6130 }
6131
6132 const CBlockIndex *pindex = nullptr;
6134 if (!m_chainman.ProcessNewBlockHeaders({cmpctblock.header},
6135 /*min_pow_checked=*/true, state,
6136 &pindex)) {
6137 if (state.IsInvalid()) {
6138 MaybePunishNodeForBlock(pfrom.GetId(), state,
6139 /*via_compact_block*/ true,
6140 "invalid header via cmpctblock");
6141 return;
6142 }
6143 }
6144
6145 if (received_new_header) {
6146 LogInfo("Saw new cmpctblock header hash=%s peer=%d\n",
6147 blockhash.ToString(), pfrom.GetId());
6148 }
6149
6150 // When we succeed in decoding a block's txids from a cmpctblock
6151 // message we typically jump to the BLOCKTXN handling code, with a
6152 // dummy (empty) BLOCKTXN message, to re-use the logic there in
6153 // completing processing of the putative block (without cs_main).
6154 bool fProcessBLOCKTXN = false;
6156
6157 // If we end up treating this as a plain headers message, call that as
6158 // well
6159 // without cs_main.
6160 bool fRevertToHeaderProcessing = false;
6161
6162 // Keep a CBlock for "optimistic" compactblock reconstructions (see
6163 // below)
6164 std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
6165 bool fBlockReconstructed = false;
6166
6167 {
6168 LOCK(cs_main);
6169 // If AcceptBlockHeader returned true, it set pindex
6170 assert(pindex);
6171 UpdateBlockAvailability(pfrom.GetId(), pindex->GetBlockHash());
6172
6173 CNodeState *nodestate = State(pfrom.GetId());
6174
6175 // If this was a new header with more work than our tip, update the
6176 // peer's last block announcement time
6177 if (received_new_header &&
6178 pindex->nChainWork >
6179 m_chainman.ActiveChain().Tip()->nChainWork) {
6180 nodestate->m_last_block_announcement = GetTime();
6181 }
6182
6183 if (pindex->nStatus.hasData()) {
6184 // Nothing to do here
6185 return;
6186 }
6187
6188 auto range_flight =
6189 mapBlocksInFlight.equal_range(pindex->GetBlockHash());
6190 size_t already_in_flight =
6191 std::distance(range_flight.first, range_flight.second);
6192 bool requested_block_from_this_peer{false};
6193
6194 // Multimap ensures ordering of outstanding requests. It's either
6195 // empty or first in line.
6196 bool first_in_flight =
6197 already_in_flight == 0 ||
6198 (range_flight.first->second.first == pfrom.GetId());
6199
6200 while (range_flight.first != range_flight.second) {
6201 if (range_flight.first->second.first == pfrom.GetId()) {
6202 requested_block_from_this_peer = true;
6203 break;
6204 }
6205 range_flight.first++;
6206 }
6207
6208 if (pindex->nChainWork <=
6209 m_chainman.ActiveChain()
6210 .Tip()
6211 ->nChainWork || // We know something better
6212 pindex->nTx != 0) {
6213 // We had this block at some point, but pruned it
6214 if (requested_block_from_this_peer) {
6215 // We requested this block for some reason, but our mempool
6216 // will probably be useless so we just grab the block via
6217 // normal getdata.
6218 std::vector<CInv> vInv(1);
6219 vInv[0] = CInv(MSG_BLOCK, blockhash);
6220 m_connman.PushMessage(
6221 &pfrom, msgMaker.Make(NetMsgType::GETDATA, vInv));
6222 }
6223 return;
6224 }
6225
6226 // If we're not close to tip yet, give up and let parallel block
6227 // fetch work its magic.
6228 if (!already_in_flight && !CanDirectFetch()) {
6229 return;
6230 }
6231
6232 // We want to be a bit conservative just to be extra careful about
6233 // DoS possibilities in compact block processing...
6234 if (pindex->nHeight <= m_chainman.ActiveChain().Height() + 2) {
6235 if ((already_in_flight < MAX_CMPCTBLOCKS_INFLIGHT_PER_BLOCK &&
6236 nodestate->vBlocksInFlight.size() <
6238 requested_block_from_this_peer) {
6239 std::list<QueuedBlock>::iterator *queuedBlockIt = nullptr;
6240 if (!BlockRequested(config, pfrom.GetId(), *pindex,
6241 &queuedBlockIt)) {
6242 if (!(*queuedBlockIt)->partialBlock) {
6243 (*queuedBlockIt)
6244 ->partialBlock.reset(
6245 new PartiallyDownloadedBlock(config,
6246 &m_mempool));
6247 } else {
6248 // The block was already in flight using compact
6249 // blocks from the same peer.
6250 LogPrint(BCLog::NET, "Peer sent us compact block "
6251 "we were already syncing!\n");
6252 return;
6253 }
6254 }
6255
6256 PartiallyDownloadedBlock &partialBlock =
6257 *(*queuedBlockIt)->partialBlock;
6258 ReadStatus status =
6259 partialBlock.InitData(cmpctblock, vExtraTxnForCompact);
6260 if (status == READ_STATUS_INVALID) {
6261 // Reset in-flight state in case Misbehaving does not
6262 // result in a disconnect
6263 RemoveBlockRequest(pindex->GetBlockHash(),
6264 pfrom.GetId());
6265 Misbehaving(*peer, "invalid compact block");
6266 return;
6267 } else if (status == READ_STATUS_FAILED) {
6268 if (first_in_flight) {
6269 // Duplicate txindices, the block is now in-flight,
6270 // so just request it.
6271 std::vector<CInv> vInv(1);
6272 vInv[0] = CInv(MSG_BLOCK, blockhash);
6273 m_connman.PushMessage(
6274 &pfrom,
6275 msgMaker.Make(NetMsgType::GETDATA, vInv));
6276 } else {
6277 // Give up for this peer and wait for other peer(s)
6278 RemoveBlockRequest(pindex->GetBlockHash(),
6279 pfrom.GetId());
6280 }
6281 return;
6282 }
6283
6285 for (size_t i = 0; i < cmpctblock.BlockTxCount(); i++) {
6286 if (!partialBlock.IsTxAvailable(i)) {
6287 req.indices.push_back(i);
6288 }
6289 }
6290 if (req.indices.empty()) {
6291 // Dirty hack to jump to BLOCKTXN code (TODO: move
6292 // message handling into their own functions)
6294 txn.blockhash = blockhash;
6295 blockTxnMsg << txn;
6296 fProcessBLOCKTXN = true;
6297 } else if (first_in_flight) {
6298 // We will try to round-trip any compact blocks we get
6299 // on failure, as long as it's first...
6300 req.blockhash = pindex->GetBlockHash();
6301 m_connman.PushMessage(
6302 &pfrom,
6303 msgMaker.Make(NetMsgType::GETBLOCKTXN, req));
6304 } else if (pfrom.m_bip152_highbandwidth_to &&
6305 (!pfrom.IsInboundConn() ||
6306 IsBlockRequestedFromOutbound(blockhash) ||
6307 already_in_flight <
6309 // ... or it's a hb relay peer and:
6310 // - peer is outbound, or
6311 // - we already have an outbound attempt in flight (so
6312 // we'll take what we can get), or
6313 // - it's not the final parallel download slot (which we
6314 // may reserve for first outbound)
6315 req.blockhash = pindex->GetBlockHash();
6316 m_connman.PushMessage(
6317 &pfrom,
6318 msgMaker.Make(NetMsgType::GETBLOCKTXN, req));
6319 } else {
6320 // Give up for this peer and wait for other peer(s)
6321 RemoveBlockRequest(pindex->GetBlockHash(),
6322 pfrom.GetId());
6323 }
6324 } else {
6325 // This block is either already in flight from a different
6326 // peer, or this peer has too many blocks outstanding to
6327 // download from. Optimistically try to reconstruct anyway
6328 // since we might be able to without any round trips.
6329 PartiallyDownloadedBlock tempBlock(config, &m_mempool);
6330 ReadStatus status =
6331 tempBlock.InitData(cmpctblock, vExtraTxnForCompact);
6332 if (status != READ_STATUS_OK) {
6333 // TODO: don't ignore failures
6334 return;
6335 }
6336 std::vector<CTransactionRef> dummy;
6337 status = tempBlock.FillBlock(*pblock, dummy);
6338 if (status == READ_STATUS_OK) {
6339 fBlockReconstructed = true;
6340 }
6341 }
6342 } else {
6343 if (requested_block_from_this_peer) {
6344 // We requested this block, but its far into the future, so
6345 // our mempool will probably be useless - request the block
6346 // normally.
6347 std::vector<CInv> vInv(1);
6348 vInv[0] = CInv(MSG_BLOCK, blockhash);
6349 m_connman.PushMessage(
6350 &pfrom, msgMaker.Make(NetMsgType::GETDATA, vInv));
6351 return;
6352 } else {
6353 // If this was an announce-cmpctblock, we want the same
6354 // treatment as a header message.
6355 fRevertToHeaderProcessing = true;
6356 }
6357 }
6358 } // cs_main
6359
6360 if (fProcessBLOCKTXN) {
6361 return ProcessMessage(config, pfrom, NetMsgType::BLOCKTXN,
6362 blockTxnMsg, time_received, interruptMsgProc);
6363 }
6364
6365 if (fRevertToHeaderProcessing) {
6366 // Headers received from HB compact block peers are permitted to be
6367 // relayed before full validation (see BIP 152), so we don't want to
6368 // disconnect the peer if the header turns out to be for an invalid
6369 // block. Note that if a peer tries to build on an invalid chain,
6370 // that will be detected and the peer will be banned.
6371 return ProcessHeadersMessage(config, pfrom, *peer,
6372 {cmpctblock.header},
6373 /*via_compact_block=*/true);
6374 }
6375
6376 if (fBlockReconstructed) {
6377 // If we got here, we were able to optimistically reconstruct a
6378 // block that is in flight from some other peer.
6379 {
6380 LOCK(cs_main);
6381 mapBlockSource.emplace(pblock->GetHash(),
6382 std::make_pair(pfrom.GetId(), false));
6383 }
6384 // Setting force_processing to true means that we bypass some of
6385 // our anti-DoS protections in AcceptBlock, which filters
6386 // unrequested blocks that might be trying to waste our resources
6387 // (eg disk space). Because we only try to reconstruct blocks when
6388 // we're close to caught up (via the CanDirectFetch() requirement
6389 // above, combined with the behavior of not requesting blocks until
6390 // we have a chain with at least the minimum chain work), and we
6391 // ignore compact blocks with less work than our tip, it is safe to
6392 // treat reconstructed compact blocks as having been requested.
6393 ProcessBlock(config, pfrom, pblock, /*force_processing=*/true,
6394 /*min_pow_checked=*/true);
6395 // hold cs_main for CBlockIndex::IsValid()
6396 LOCK(cs_main);
6397 if (pindex->IsValid(BlockValidity::TRANSACTIONS)) {
6398 // Clear download state for this block, which is in process from
6399 // some other peer. We do this after calling. ProcessNewBlock so
6400 // that a malleated cmpctblock announcement can't be used to
6401 // interfere with block relay.
6402 RemoveBlockRequest(pblock->GetHash(), std::nullopt);
6403 }
6404 }
6405 return;
6406 }
6407
6408 if (msg_type == NetMsgType::BLOCKTXN) {
6409 // Ignore blocktxn received while importing
6410 if (m_chainman.m_blockman.LoadingBlocks()) {
6412 "Unexpected blocktxn message received from peer %d\n",
6413 pfrom.GetId());
6414 return;
6415 }
6416
6417 BlockTransactions resp;
6418 vRecv >> resp;
6419
6420 std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
6421 bool fBlockRead = false;
6422 {
6423 LOCK(cs_main);
6424
6425 auto range_flight = mapBlocksInFlight.equal_range(resp.blockhash);
6426 size_t already_in_flight =
6427 std::distance(range_flight.first, range_flight.second);
6428 bool requested_block_from_this_peer{false};
6429
6430 // Multimap ensures ordering of outstanding requests. It's either
6431 // empty or first in line.
6432 bool first_in_flight =
6433 already_in_flight == 0 ||
6434 (range_flight.first->second.first == pfrom.GetId());
6435
6436 while (range_flight.first != range_flight.second) {
6437 auto [node_id, block_it] = range_flight.first->second;
6438 if (node_id == pfrom.GetId() && block_it->partialBlock) {
6439 requested_block_from_this_peer = true;
6440 break;
6441 }
6442 range_flight.first++;
6443 }
6444
6445 if (!requested_block_from_this_peer) {
6447 "Peer %d sent us block transactions for block "
6448 "we weren't expecting\n",
6449 pfrom.GetId());
6450 return;
6451 }
6452
6453 PartiallyDownloadedBlock &partialBlock =
6454 *range_flight.first->second.second->partialBlock;
6455 ReadStatus status = partialBlock.FillBlock(*pblock, resp.txn);
6456 if (status == READ_STATUS_INVALID) {
6457 // Reset in-flight state in case of Misbehaving does not
6458 // result in a disconnect.
6459 RemoveBlockRequest(resp.blockhash, pfrom.GetId());
6460 Misbehaving(
6461 *peer,
6462 "invalid compact block/non-matching block transactions");
6463 return;
6464 } else if (status == READ_STATUS_FAILED) {
6465 if (first_in_flight) {
6466 // Might have collided, fall back to getdata now :(
6467 std::vector<CInv> invs;
6468 invs.push_back(CInv(MSG_BLOCK, resp.blockhash));
6469 m_connman.PushMessage(
6470 &pfrom, msgMaker.Make(NetMsgType::GETDATA, invs));
6471 } else {
6472 RemoveBlockRequest(resp.blockhash, pfrom.GetId());
6473 LogPrint(
6474 BCLog::NET,
6475 "Peer %d sent us a compact block but it failed to "
6476 "reconstruct, waiting on first download to complete\n",
6477 pfrom.GetId());
6478 return;
6479 }
6480 } else {
6481 // Block is either okay, or possibly we received
6482 // READ_STATUS_CHECKBLOCK_FAILED.
6483 // Note that CheckBlock can only fail for one of a few reasons:
6484 // 1. bad-proof-of-work (impossible here, because we've already
6485 // accepted the header)
6486 // 2. merkleroot doesn't match the transactions given (already
6487 // caught in FillBlock with READ_STATUS_FAILED, so
6488 // impossible here)
6489 // 3. the block is otherwise invalid (eg invalid coinbase,
6490 // block is too big, too many sigChecks, etc).
6491 // So if CheckBlock failed, #3 is the only possibility.
6492 // Under BIP 152, we don't DoS-ban unless proof of work is
6493 // invalid (we don't require all the stateless checks to have
6494 // been run). This is handled below, so just treat this as
6495 // though the block was successfully read, and rely on the
6496 // handling in ProcessNewBlock to ensure the block index is
6497 // updated, etc.
6498
6499 // it is now an empty pointer
6500 RemoveBlockRequest(resp.blockhash, pfrom.GetId());
6501 fBlockRead = true;
6502 // mapBlockSource is used for potentially punishing peers and
6503 // updating which peers send us compact blocks, so the race
6504 // between here and cs_main in ProcessNewBlock is fine.
6505 // BIP 152 permits peers to relay compact blocks after
6506 // validating the header only; we should not punish peers
6507 // if the block turns out to be invalid.
6508 mapBlockSource.emplace(resp.blockhash,
6509 std::make_pair(pfrom.GetId(), false));
6510 }
6511 } // Don't hold cs_main when we call into ProcessNewBlock
6512 if (fBlockRead) {
6513 // Since we requested this block (it was in mapBlocksInFlight),
6514 // force it to be processed, even if it would not be a candidate for
6515 // new tip (missing previous block, chain not long enough, etc)
6516 // This bypasses some anti-DoS logic in AcceptBlock (eg to prevent
6517 // disk-space attacks), but this should be safe due to the
6518 // protections in the compact block handler -- see related comment
6519 // in compact block optimistic reconstruction handling.
6520 ProcessBlock(config, pfrom, pblock, /*force_processing=*/true,
6521 /*min_pow_checked=*/true);
6522 }
6523 return;
6524 }
6525
6526 if (msg_type == NetMsgType::HEADERS) {
6527 // Ignore headers received while importing
6528 if (m_chainman.m_blockman.LoadingBlocks()) {
6530 "Unexpected headers message received from peer %d\n",
6531 pfrom.GetId());
6532 return;
6533 }
6534
6535 std::vector<CBlockHeader> headers;
6536
6537 // Bypass the normal CBlock deserialization, as we don't want to risk
6538 // deserializing 2000 full blocks.
6539 unsigned int nCount = ReadCompactSize(vRecv);
6540 if (nCount > MAX_HEADERS_RESULTS) {
6541 Misbehaving(*peer,
6542 strprintf("too-many-headers: headers message size = %u",
6543 nCount));
6544 return;
6545 }
6546 headers.resize(nCount);
6547 for (unsigned int n = 0; n < nCount; n++) {
6548 vRecv >> headers[n];
6549 // Ignore tx count; assume it is 0.
6550 ReadCompactSize(vRecv);
6551 }
6552
6553 ProcessHeadersMessage(config, pfrom, *peer, std::move(headers),
6554 /*via_compact_block=*/false);
6555
6556 // Check if the headers presync progress needs to be reported to
6557 // validation. This needs to be done without holding the
6558 // m_headers_presync_mutex lock.
6559 if (m_headers_presync_should_signal.exchange(false)) {
6560 HeadersPresyncStats stats;
6561 {
6562 LOCK(m_headers_presync_mutex);
6563 auto it =
6564 m_headers_presync_stats.find(m_headers_presync_bestpeer);
6565 if (it != m_headers_presync_stats.end()) {
6566 stats = it->second;
6567 }
6568 }
6569 if (stats.second) {
6570 m_chainman.ReportHeadersPresync(
6571 stats.first, stats.second->first, stats.second->second);
6572 }
6573 }
6574
6575 return;
6576 }
6577
6578 if (msg_type == NetMsgType::BLOCK) {
6579 // Ignore block received while importing
6580 if (m_chainman.m_blockman.LoadingBlocks()) {
6582 "Unexpected block message received from peer %d\n",
6583 pfrom.GetId());
6584 return;
6585 }
6586
6587 std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
6588 vRecv >> *pblock;
6589
6590 LogPrint(BCLog::NET, "received block %s peer=%d\n",
6591 pblock->GetHash().ToString(), pfrom.GetId());
6592
6593 const CBlockIndex *prev_block{
6594 WITH_LOCK(m_chainman.GetMutex(),
6595 return m_chainman.m_blockman.LookupBlockIndex(
6596 pblock->hashPrevBlock))};
6597
6598 if (IsBlockMutated(/*block=*/*pblock)) {
6600 "Received mutated block from peer=%d\n", peer->m_id);
6601 Misbehaving(*peer, "mutated block");
6603 RemoveBlockRequest(pblock->GetHash(), peer->m_id));
6604 return;
6605 }
6606
6607 // Process all blocks from whitelisted peers, even if not requested,
6608 // unless we're still syncing with the network. Such an unrequested
6609 // block may still be processed, subject to the conditions in
6610 // AcceptBlock().
6611 bool forceProcessing = pfrom.HasPermission(NetPermissionFlags::NoBan) &&
6612 !m_chainman.IsInitialBlockDownload();
6613 const BlockHash hash = pblock->GetHash();
6614 bool min_pow_checked = false;
6615 {
6616 LOCK(cs_main);
6617 // Always process the block if we requested it, since we may
6618 // need it even when it's not a candidate for a new best tip.
6619 forceProcessing = IsBlockRequested(hash);
6620 RemoveBlockRequest(hash, pfrom.GetId());
6621 // mapBlockSource is only used for punishing peers and setting
6622 // which peers send us compact blocks, so the race between here and
6623 // cs_main in ProcessNewBlock is fine.
6624 mapBlockSource.emplace(hash, std::make_pair(pfrom.GetId(), true));
6625
6626 // Check work on this block against our anti-dos thresholds.
6627 if (prev_block &&
6628 prev_block->nChainWork +
6629 CalculateHeadersWork({pblock->GetBlockHeader()}) >=
6630 GetAntiDoSWorkThreshold()) {
6631 min_pow_checked = true;
6632 }
6633 }
6634 ProcessBlock(config, pfrom, pblock, forceProcessing, min_pow_checked);
6635 return;
6636 }
6637
6638 if (msg_type == NetMsgType::AVAHELLO) {
6639 if (!m_avalanche) {
6640 return;
6641 }
6642 {
6644 if (pfrom.m_avalanche_pubkey.has_value()) {
6645 LogPrint(
6647 "Ignoring avahello from peer %d: already in our node set\n",
6648 pfrom.GetId());
6649 return;
6650 }
6651
6652 avalanche::Delegation delegation;
6653 vRecv >> delegation;
6654
6655 // A delegation with an all zero limited id indicates that the peer
6656 // has no proof, so we're done.
6657 if (delegation.getLimitedProofId() != uint256::ZERO) {
6659 CPubKey pubkey;
6660 if (!delegation.verify(state, pubkey)) {
6661 Misbehaving(*peer, "invalid-delegation");
6662 return;
6663 }
6664 pfrom.m_avalanche_pubkey = std::move(pubkey);
6665
6666 HashWriter sighasher{};
6667 sighasher << delegation.getId();
6668 sighasher << pfrom.nRemoteHostNonce;
6669 sighasher << pfrom.GetLocalNonce();
6670 sighasher << pfrom.nRemoteExtraEntropy;
6671 sighasher << pfrom.GetLocalExtraEntropy();
6672
6674 vRecv >> sig;
6675 if (!(*pfrom.m_avalanche_pubkey)
6676 .VerifySchnorr(sighasher.GetHash(), sig)) {
6677 Misbehaving(*peer, "invalid-avahello-signature");
6678 return;
6679 }
6680
6681 // If we don't know this proof already, add it to the tracker so
6682 // it can be requested.
6683 const avalanche::ProofId proofid(delegation.getProofId());
6684 if (!AlreadyHaveProof(proofid)) {
6685 const bool preferred = isPreferredDownloadPeer(pfrom);
6686 LOCK(cs_proofrequest);
6687 AddProofAnnouncement(pfrom, proofid,
6688 GetTime<std::chrono::microseconds>(),
6689 preferred);
6690 }
6691
6692 // Don't check the return value. If it fails we probably don't
6693 // know about the proof yet.
6694 m_avalanche->withPeerManager([&](avalanche::PeerManager &pm) {
6695 return pm.addNode(pfrom.GetId(), proofid);
6696 });
6697 }
6698
6699 pfrom.m_avalanche_enabled = true;
6700 }
6701
6702 // Send getavaaddr and getavaproofs to our avalanche outbound or
6703 // manual connections
6704 if (!pfrom.IsInboundConn()) {
6705 m_connman.PushMessage(&pfrom,
6706 msgMaker.Make(NetMsgType::GETAVAADDR));
6707 WITH_LOCK(peer->m_addr_token_bucket_mutex,
6708 peer->m_addr_token_bucket += m_opts.max_addr_to_send);
6709
6710 if (peer->m_proof_relay && !m_chainman.IsInitialBlockDownload()) {
6711 m_connman.PushMessage(&pfrom,
6712 msgMaker.Make(NetMsgType::GETAVAPROOFS));
6713 peer->m_proof_relay->compactproofs_requested = true;
6714 }
6715 }
6716
6717 return;
6718 }
6719
6720 if (msg_type == NetMsgType::AVAPOLL) {
6721 if (!m_avalanche) {
6722 return;
6723 }
6724 const auto now = Now<SteadyMilliseconds>();
6725
6726 const auto last_poll = pfrom.m_last_poll;
6727 pfrom.m_last_poll = now;
6728
6729 if (now <
6730 last_poll + std::chrono::milliseconds(m_opts.avalanche_cooldown)) {
6732 "Ignoring repeated avapoll from peer %d: cooldown not "
6733 "elapsed\n",
6734 pfrom.GetId());
6735 return;
6736 }
6737
6738 const bool quorum_established = m_avalanche->isQuorumEstablished();
6739
6740 uint64_t round;
6741 Unserialize(vRecv, round);
6742
6743 unsigned int nCount = ReadCompactSize(vRecv);
6744 if (nCount > AVALANCHE_MAX_ELEMENT_POLL) {
6745 Misbehaving(
6746 *peer,
6747 strprintf("too-many-ava-poll: poll message size = %u", nCount));
6748 return;
6749 }
6750
6751 std::vector<avalanche::Vote> votes;
6752 votes.reserve(nCount);
6753
6754 bool fPreconsensus{false};
6755 bool fStakingPreconsensus{false};
6756 {
6757 LOCK(::cs_main);
6758 const CBlockIndex *tip = m_chainman.ActiveTip();
6759 fPreconsensus = m_avalanche->isPreconsensusActivated(tip);
6760 fStakingPreconsensus =
6761 m_avalanche->isStakingPreconsensusActivated(tip);
6762 }
6763
6764 for (unsigned int n = 0; n < nCount; n++) {
6765 CInv inv;
6766 vRecv >> inv;
6767
6768 // Default vote for unknown inv type
6769 uint32_t vote = -1;
6770
6771 // We don't vote definitively until we have an established quorum
6772 if (!quorum_established) {
6773 votes.emplace_back(vote, inv.hash);
6774 continue;
6775 }
6776
6777 // If inv's type is known, get a vote for its hash
6778 switch (inv.type) {
6779 case MSG_TX: {
6780 if (fPreconsensus) {
6781 vote =
6782 GetAvalancheVoteForTx(*m_avalanche, TxId(inv.hash));
6783 }
6784 } break;
6785 case MSG_BLOCK: {
6786 vote = WITH_LOCK(cs_main, return GetAvalancheVoteForBlock(
6787 BlockHash(inv.hash)));
6788 } break;
6789 case MSG_AVA_PROOF: {
6791 *m_avalanche, avalanche::ProofId(inv.hash));
6792 } break;
6794 if (fStakingPreconsensus) {
6795 vote = m_avalanche->getStakeContenderStatus(
6797 }
6798 } break;
6799 default: {
6801 "poll inv type %d unknown from peer=%d\n",
6802 inv.type, pfrom.GetId());
6803 }
6804 }
6805
6806 votes.emplace_back(vote, inv.hash);
6807 }
6808
6809 // Send the query to the node.
6810 m_avalanche->sendResponse(
6811 &pfrom, avalanche::Response(round, m_opts.avalanche_cooldown,
6812 std::move(votes)));
6813 return;
6814 }
6815
6816 if (msg_type == NetMsgType::AVARESPONSE) {
6817 if (!m_avalanche) {
6818 return;
6819 }
6820 // As long as QUIC is not implemented, we need to sign response and
6821 // verify response's signatures in order to avoid any manipulation of
6822 // messages at the transport level.
6823 HashVerifier verifier(vRecv);
6825 verifier >> response;
6826
6828 vRecv >> sig;
6829
6830 {
6832 if (!pfrom.m_avalanche_pubkey.has_value() ||
6833 !(*pfrom.m_avalanche_pubkey)
6834 .VerifySchnorr(verifier.GetHash(), sig)) {
6835 Misbehaving(*peer, "invalid-ava-response-signature");
6836 return;
6837 }
6838 }
6839
6840 auto now = GetTime<std::chrono::seconds>();
6841
6842 std::vector<avalanche::VoteItemUpdate> updates;
6843 bool disconnect{false};
6844 std::string error;
6845 if (!m_avalanche->registerVotes(pfrom.GetId(), response, updates,
6846 disconnect, error)) {
6847 if (disconnect) {
6848 Misbehaving(*peer, error);
6849 return;
6850 }
6851
6852 // Otherwise the node may have got a network issue. Increase the
6853 // fault counter instead and only ban if we reached a threshold.
6854 // This allows for fault tolerance should there be a temporary
6855 // outage while still preventing DoS'ing behaviors, as the counter
6856 // is reset if no fault occured over some time period.
6859
6860 // Allow up to 12 messages before increasing the ban score. Since
6861 // the queries are cleared after 10s, this is at least 2 minutes
6862 // of network outage tolerance over the 1h window.
6863 if (pfrom.m_avalanche_message_fault_counter > 12) {
6864 LogPrint(
6866 "Repeated failure to register votes from peer %d: %s\n",
6867 pfrom.GetId(), error);
6869 if (pfrom.m_avalanche_message_fault_score > 100) {
6870 Misbehaving(*peer, error);
6871 }
6872 return;
6873 }
6874 }
6875
6876 // If no fault occurred within the last hour, reset the fault counter
6877 if (now > (pfrom.m_avalanche_last_message_fault.load() + 1h)) {
6879 }
6880
6881 pfrom.invsVoted(response.GetVotes().size());
6882
6883 auto logVoteUpdate = [](const auto &voteUpdate,
6884 const std::string &voteItemTypeStr,
6885 const auto &voteItemId) {
6886 std::string voteOutcome;
6887 bool alwaysPrint = false;
6888 switch (voteUpdate.getStatus()) {
6890 voteOutcome = "invalidated";
6891 alwaysPrint = true;
6892 break;
6894 voteOutcome = "rejected";
6895 break;
6897 voteOutcome = "accepted";
6898 break;
6900 voteOutcome = "finalized";
6901 // Don't log tx finalization unconditionally as it can be
6902 // quite spammy.
6903 alwaysPrint = voteItemTypeStr != "tx";
6904 break;
6906 voteOutcome = "stalled";
6907 alwaysPrint = true;
6908 break;
6909
6910 // No default case, so the compiler can warn about missing
6911 // cases
6912 }
6913
6914 // Always log the stake contenders to the avalanche category
6915 alwaysPrint &= (voteItemTypeStr != "contender");
6916
6917 if (alwaysPrint) {
6918 LogPrintf("Avalanche %s %s %s\n", voteOutcome, voteItemTypeStr,
6919 voteItemId.ToString());
6920 } else {
6921 // Only print these messages if -debug=avalanche is set
6922 LogPrint(BCLog::AVALANCHE, "Avalanche %s %s %s\n", voteOutcome,
6923 voteItemTypeStr, voteItemId.ToString());
6924 }
6925 };
6926
6927 bool shouldActivateBestChain = false;
6928
6929 bool fPreconsensus{false};
6930 bool fStakingPreconsensus{false};
6931 {
6932 LOCK(::cs_main);
6933 const CBlockIndex *tip = m_chainman.ActiveTip();
6934 fPreconsensus = m_avalanche->isPreconsensusActivated(tip);
6935 fStakingPreconsensus =
6936 m_avalanche->isStakingPreconsensusActivated(tip);
6937 }
6938
6939 for (const auto &u : updates) {
6940 const avalanche::AnyVoteItem &item = u.getVoteItem();
6941
6942 // Don't use a visitor here as we want to ignore unsupported item
6943 // types. This comes in handy when adding new types.
6944 if (auto pitem = std::get_if<const avalanche::ProofRef>(&item)) {
6945 avalanche::ProofRef proof = *pitem;
6946 const avalanche::ProofId &proofid = proof->getId();
6947
6948 logVoteUpdate(u, "proof", proofid);
6949
6950 auto rejectionMode =
6952 auto nextCooldownTimePoint = GetTime<std::chrono::seconds>();
6953 switch (u.getStatus()) {
6955 m_avalanche->withPeerManager(
6956 [&](avalanche::PeerManager &pm) {
6957 pm.setInvalid(proofid);
6958 });
6959 // Fallthrough
6961 // Invalidate mode removes the proof from all proof
6962 // pools
6963 rejectionMode =
6965 // Fallthrough
6967 if (!m_avalanche->withPeerManager(
6968 [&](avalanche::PeerManager &pm) {
6969 return pm.rejectProof(proofid,
6970 rejectionMode);
6971 })) {
6973 "ERROR: Failed to reject proof: %s\n",
6974 proofid.GetHex());
6975 }
6976 break;
6978 m_avalanche->setRecentlyFinalized(proofid);
6979 nextCooldownTimePoint += std::chrono::seconds(
6980 m_opts.avalanche_peer_replacement_cooldown);
6982 if (!m_avalanche->withPeerManager(
6983 [&](avalanche::PeerManager &pm) {
6984 pm.registerProof(
6985 proof,
6986 avalanche::PeerManager::
6987 RegistrationMode::FORCE_ACCEPT);
6988 return pm.forPeer(
6989 proofid,
6990 [&](const avalanche::Peer &peer) {
6991 pm.updateNextPossibleConflictTime(
6992 peer.peerid,
6993 nextCooldownTimePoint);
6994 if (u.getStatus() ==
6995 avalanche::VoteStatus::
6996 Finalized) {
6997 pm.setFinalized(peer.peerid);
6998 }
6999 // Only fail if the peer was not
7000 // created
7001 return true;
7002 });
7003 })) {
7005 "ERROR: Failed to accept proof: %s\n",
7006 proofid.GetHex());
7007 }
7008 break;
7009 }
7010 }
7011
7012 auto getBlockFromIndex = [this](const CBlockIndex *pindex) {
7013 // First check if the block is cached before reading
7014 // from disk.
7015 std::shared_ptr<const CBlock> pblock = WITH_LOCK(
7016 m_most_recent_block_mutex, return m_most_recent_block);
7017
7018 if (!pblock || pblock->GetHash() != pindex->GetBlockHash()) {
7019 std::shared_ptr<CBlock> pblockRead =
7020 std::make_shared<CBlock>();
7021 if (!m_chainman.m_blockman.ReadBlockFromDisk(*pblockRead,
7022 *pindex)) {
7023 assert(!"cannot load block from disk");
7024 }
7025 pblock = pblockRead;
7026 }
7027 return pblock;
7028 };
7029
7030 if (auto pitem = std::get_if<const CBlockIndex *>(&item)) {
7031 CBlockIndex *pindex = const_cast<CBlockIndex *>(*pitem);
7032
7033 shouldActivateBestChain = true;
7034
7035 logVoteUpdate(u, "block", pindex->GetBlockHash());
7036
7037 switch (u.getStatus()) {
7040 m_chainman.ActiveChainstate().ParkBlock(state, pindex);
7041 if (!state.IsValid()) {
7042 LogPrintf("ERROR: Database error: %s\n",
7043 state.GetRejectReason());
7044 return;
7045 }
7046 } break;
7049 m_chainman.ActiveChainstate().ParkBlock(state, pindex);
7050 if (!state.IsValid()) {
7051 LogPrintf("ERROR: Database error: %s\n",
7052 state.GetRejectReason());
7053 return;
7054 }
7055
7056 auto pblock = getBlockFromIndex(pindex);
7057 assert(pblock);
7058
7059 WITH_LOCK(cs_main, GetMainSignals().BlockInvalidated(
7060 pindex, pblock));
7061 } break;
7063 LOCK(cs_main);
7064 m_chainman.ActiveChainstate().UnparkBlock(pindex);
7065 } break;
7067 m_avalanche->setRecentlyFinalized(
7068 pindex->GetBlockHash());
7069
7070 m_avalanche->cleanupStakingRewards(pindex->nHeight);
7071
7072 std::unique_ptr<node::CBlockTemplate> blockTemplate;
7073 {
7074 LOCK(cs_main);
7075 auto &chainstate = m_chainman.ActiveChainstate();
7076 chainstate.UnparkBlock(pindex);
7077
7078 const bool newlyFinalized =
7079 !chainstate.IsBlockAvalancheFinalized(pindex) &&
7080 chainstate.AvalancheFinalizeBlock(pindex,
7081 *m_avalanche);
7082
7083 // Skip if the block is already finalized, aka an
7084 // ancestor of the finalized tip.
7085 if (fPreconsensus && newlyFinalized) {
7086 auto pblock = getBlockFromIndex(pindex);
7087 assert(pblock);
7088
7089 {
7090 // If the finalized block is not the tip, we
7091 // need to keep track of the transactions
7092 // from the non final blocks, so that we can
7093 // check if they were finalized by
7094 // pre-consensus. If these transactions were
7095 // pruned from the radix tree, their
7096 // finalization status could be lost in the
7097 // case the non final blocks are later
7098 // rejected.
7099 CBlockIndex *tip = m_chainman.ActiveTip();
7100 std::unordered_set<TxId, SaltedTxIdHasher>
7101 confirmedTxIdsInNonFinalizedBlocks;
7102 for (const CBlockIndex *block = tip;
7103 block != nullptr && block != pindex;
7104 block = block->pprev) {
7105 auto currentBlock =
7106 getBlockFromIndex(block);
7107 assert(currentBlock);
7108 for (const auto &tx :
7109 currentBlock->vtx) {
7110 confirmedTxIdsInNonFinalizedBlocks
7111 .insert(tx->GetId());
7112 }
7113 }
7114
7115 // Remove the transactions that are not
7116 // confirmed
7117 LOCK(m_mempool.cs);
7118 m_mempool.removeForFinalizedBlock(
7119 confirmedTxIdsInNonFinalizedBlocks);
7120
7121 // Now add mempool transactions to the poll.
7122 // To determine which transaction to add, we
7123 // leverage the legacy block template
7124 // construction method and build a template
7125 // with the most valuable txs in it. These
7126 // transactions are sorted topologically;
7127 // parents come before children, so we can
7128 // poll for children first and optimize the
7129 // number of polls.
7130 node::BlockAssembler blockAssembler(
7131 config, chainstate, &m_mempool,
7132 m_avalanche);
7133 blockAssembler.pblocktemplate.reset(
7134 new node::CBlockTemplate());
7135
7136 if (blockAssembler.pblocktemplate) {
7137 blockAssembler.addTxs(m_mempool);
7138 blockTemplate = std::move(
7139 blockAssembler.pblocktemplate);
7140 }
7141 }
7142 }
7143 } // release cs_main
7144
7145 if (blockTemplate) {
7146 // We could check if the tx is final already
7147 // but addToReconcile will skip the recently
7148 // finalized txs, so let's abuse this
7149 // feature and avoid a tree lookup for each
7150 // tx as an optimization.
7151 for (const auto &templateEntry :
7152 reverse_iterate(blockTemplate->entries)) {
7153 m_avalanche->addToReconcile(templateEntry.tx);
7154 }
7155 }
7156 } break;
7158 // Fall back on Nakamoto consensus in the absence of
7159 // Avalanche votes for other competing or descendant
7160 // blocks.
7161 break;
7162 }
7163 }
7164
7165 if (fStakingPreconsensus) {
7166 if (auto pitem =
7167 std::get_if<const avalanche::StakeContenderId>(&item)) {
7168 const avalanche::StakeContenderId contenderId = *pitem;
7169 logVoteUpdate(u, "contender", contenderId);
7170
7171 switch (u.getStatus()) {
7174 m_avalanche->rejectStakeContender(contenderId);
7175 break;
7176 }
7178 m_avalanche->setRecentlyFinalized(contenderId);
7179 m_avalanche->finalizeStakeContender(contenderId);
7180 break;
7181 }
7183 m_avalanche->acceptStakeContender(contenderId);
7184 break;
7185 }
7187 break;
7188 }
7189 }
7190 }
7191
7192 if (!fPreconsensus) {
7193 continue;
7194 }
7195
7196 if (auto pitem = std::get_if<const CTransactionRef>(&item)) {
7197 const CTransactionRef tx = *pitem;
7198 assert(tx != nullptr);
7199
7200 const TxId &txid = tx->GetId();
7201 const auto status{u.getStatus()};
7202
7203 if (status != avalanche::VoteStatus::Finalized) {
7204 // Because we also want to log the parents txs of this
7205 // finalized tx, we log the finalization later.
7206 logVoteUpdate(u, "tx", txid);
7207 }
7208
7209 switch (status) {
7210 case avalanche::VoteStatus::Invalid: // Fallthrough
7212 // Remove from the mempool and the finalized tree, as
7213 // well as all the children txs. Note that removal from
7214 // the finalized tree is only a safety net and should
7215 // never happen.
7216 LOCK2(cs_main, m_mempool.cs);
7217 if (m_mempool.exists(txid)) {
7218 m_mempool.removeRecursive(
7220
7221 std::vector<CTransactionRef> conflictingTxs =
7222 m_mempool.withConflicting(
7223 [&tx](const TxConflicting &conflicting) {
7224 return conflicting.GetConflictTxs(tx);
7225 });
7226
7227 if (conflictingTxs.size() > 0) {
7228 // Pull the first tx only, erase the others so
7229 // they can be re-downloaded if needed.
7230 auto result = m_chainman.ProcessTransaction(
7231 conflictingTxs[0]);
7232 assert(result.m_state.IsValid());
7233 }
7234
7235 m_mempool.withConflicting(
7236 [&conflictingTxs,
7237 &tx](TxConflicting &conflicting) {
7238 for (const auto &conflictingTx :
7239 conflictingTxs) {
7240 conflicting.EraseTx(
7241 conflictingTx->GetId());
7242 }
7243
7244 // Note that we don't store the descendants,
7245 // which should be re-downloaded. This could
7246 // be optimized but we will have to manage
7247 // the topological ordering.
7248 conflicting.AddTx(tx, NO_NODE);
7249 });
7250 }
7251
7252 if (status == avalanche::VoteStatus::Invalid) {
7253 // Also remove from the conflicting pool. If it was
7254 // in the mempool (unlikely) we just moved it there.
7255 m_mempool.withConflicting(
7256 [&txid](TxConflicting &conflicting) {
7257 conflicting.EraseTx(txid);
7258 });
7259
7260 m_recent_rejects.insert(txid);
7261
7262 CCoinsViewMemPool coinViewMempool(
7263 &m_chainman.ActiveChainstate().CoinsTip(),
7264 m_mempool);
7265 CCoinsViewCache coinViewCache(&coinViewMempool);
7266 auto spentCoins =
7267 std::make_shared<const std::vector<Coin>>(
7268 GetSpentCoins(tx, coinViewCache));
7269
7271 spentCoins);
7272 }
7273
7274 break;
7275 }
7277 // fallthrough
7279 {
7280 LOCK2(cs_main, m_mempool.cs);
7281 if (m_mempool.withConflicting(
7282 [&txid](const TxConflicting &conflicting) {
7283 return conflicting.HaveTx(txid);
7284 })) {
7285 // Swap conflicting txs from/to the mempool
7286 std::vector<CTransactionRef>
7287 mempool_conflicting_txs;
7288 for (const auto &txin : tx->vin) {
7289 // Find the conflicting txs
7290 if (CTransactionRef conflict =
7291 m_mempool.GetConflictTx(
7292 txin.prevout)) {
7293 mempool_conflicting_txs.push_back(
7294 std::move(conflict));
7295 }
7296 }
7297 m_mempool.removeConflicts(*tx);
7298
7299 auto result = m_chainman.ProcessTransaction(tx);
7300 assert(result.m_state.IsValid());
7301
7302 m_mempool.withConflicting(
7303 [&txid, &mempool_conflicting_txs](
7304 TxConflicting &conflicting) {
7305 conflicting.EraseTx(txid);
7306 // Store the first tx only, the others
7307 // can be re-downloaded if needed.
7308 if (mempool_conflicting_txs.size() >
7309 0) {
7310 conflicting.AddTx(
7311 mempool_conflicting_txs[0],
7312 NO_NODE);
7313 }
7314 });
7315 }
7316 }
7317
7318 if (status == avalanche::VoteStatus::Finalized) {
7319 LOCK2(cs_main, m_mempool.cs);
7320 auto it = m_mempool.GetIter(txid);
7321 if (!it.has_value()) {
7322 LogPrint(
7324 "Error: finalized tx (%s) is not in the "
7325 "mempool\n",
7326 txid.ToString());
7327 break;
7328 }
7329
7330 std::vector<TxId> finalizedTxIds;
7331 m_mempool.setAvalancheFinalized(
7332 **it, m_chainparams.GetConsensus(),
7333 *Assert(m_chainman.ActiveTip()),
7334 finalizedTxIds);
7335
7336 for (const auto &finalized_txid : finalizedTxIds) {
7337 m_avalanche->setRecentlyFinalized(
7338 finalized_txid);
7339 // Log the parent tx being implicitely finalized
7340 // as well
7341 logVoteUpdate(u, "tx", finalized_txid);
7342 }
7343
7344 // NO_THREAD_SAFETY_ANALYSIS because
7345 // m_recent_rejects requires cs_main in the lambda
7346 m_mempool.withConflicting(
7347 [&](TxConflicting &conflicting)
7349 std::vector<CTransactionRef>
7350 conflictingTxs =
7351 conflicting.GetConflictTxs(tx);
7352 for (const auto &conflictingTx :
7353 conflictingTxs) {
7354 m_recent_rejects.insert(
7355 conflictingTx->GetId());
7356 conflicting.EraseTx(
7357 conflictingTx->GetId());
7358 }
7359 });
7360 }
7361
7362 break;
7363 }
7365 LOCK(cs_main);
7366
7367 // If the tx is stale, there is no point keeping it
7368 // around as it will no be mined. Let's remove it but
7369 // also forget we got it so it can be eventually
7370 // re-downloaded.
7371 {
7372 LOCK(m_mempool.cs);
7373 m_mempool.removeRecursive(
7375
7376 m_mempool.withConflicting(
7377 [&txid](TxConflicting &conflicting) {
7378 conflicting.EraseTx(txid);
7379 });
7380 }
7381
7382 // Make sure we can request this tx again
7383 m_txrequest.ForgetInvId(txid);
7384
7385 {
7386 // Save the stalled txids so that we can relay them
7387 // to our peers.
7388 LOCK(m_peer_mutex);
7389 for (auto &it : m_peer_map) {
7390 auto tx_relay = (*it.second).GetTxRelay();
7391 if (!tx_relay) {
7392 continue;
7393 }
7394
7395 LOCK(tx_relay->m_tx_inventory_mutex);
7396
7397 // We limit the size of the stalled txs set to
7398 // avoid unbounded memory growth. In practice,
7399 // this should not be an issue as stalled txs
7400 // should be few and far between. If we are at
7401 // the limit, remove the oldest entries.
7402 auto &stalled_by_time =
7403 tx_relay->m_avalanche_stalled_txids
7404 .get<by_time>();
7405 if (stalled_by_time.size() >=
7407 stalled_by_time.erase(
7408 stalled_by_time.begin()->timeAdded);
7409 }
7410
7411 tx_relay->m_avalanche_stalled_txids.insert(
7412 {txid, now});
7413 }
7414 }
7415
7416 break;
7417 }
7418 }
7419 }
7420 }
7421
7422 if (shouldActivateBestChain) {
7424 if (!m_chainman.ActiveChainstate().ActivateBestChain(
7425 state, /*pblock=*/nullptr, m_avalanche)) {
7426 LogPrintf("failed to activate chain (%s)\n", state.ToString());
7427 }
7428 }
7429
7430 return;
7431 }
7432
7433 if (msg_type == NetMsgType::AVAPROOF) {
7434 if (!m_avalanche) {
7435 return;
7436 }
7437 auto proof = RCUPtr<avalanche::Proof>::make();
7438 vRecv >> *proof;
7439
7440 ReceivedAvalancheProof(pfrom, *peer, proof);
7441
7442 return;
7443 }
7444
7445 if (msg_type == NetMsgType::GETAVAPROOFS) {
7446 if (!m_avalanche) {
7447 return;
7448 }
7449 if (peer->m_proof_relay == nullptr) {
7450 return;
7451 }
7452
7453 peer->m_proof_relay->lastSharedProofsUpdate =
7454 GetTime<std::chrono::seconds>();
7455
7456 peer->m_proof_relay->sharedProofs =
7457 m_avalanche->withPeerManager([&](const avalanche::PeerManager &pm) {
7458 return pm.getShareableProofsSnapshot();
7459 });
7460
7461 avalanche::CompactProofs compactProofs(
7462 peer->m_proof_relay->sharedProofs);
7463 m_connman.PushMessage(
7464 &pfrom, msgMaker.Make(NetMsgType::AVAPROOFS, compactProofs));
7465
7466 return;
7467 }
7468
7469 if (msg_type == NetMsgType::AVAPROOFS) {
7470 if (!m_avalanche) {
7471 return;
7472 }
7473 if (peer->m_proof_relay == nullptr) {
7474 return;
7475 }
7476
7477 // Only process the compact proofs if we requested them
7478 if (!peer->m_proof_relay->compactproofs_requested) {
7479 LogPrint(BCLog::AVALANCHE, "Ignoring unsollicited avaproofs\n");
7480 return;
7481 }
7482 peer->m_proof_relay->compactproofs_requested = false;
7483
7484 avalanche::CompactProofs compactProofs;
7485 try {
7486 vRecv >> compactProofs;
7487 } catch (std::ios_base::failure &e) {
7488 // This compact proofs have non contiguous or overflowing indexes
7489 Misbehaving(*peer, "avaproofs-bad-indexes");
7490 return;
7491 }
7492
7493 // If there are prefilled proofs, process them first
7494 for (const auto &prefilledProof : compactProofs.getPrefilledProofs()) {
7495 if (!ReceivedAvalancheProof(pfrom, *peer, prefilledProof.proof)) {
7496 // If we got an invalid proof, the peer is getting banned and we
7497 // can bail out.
7498 return;
7499 }
7500 }
7501
7502 // If there is no shortid, avoid parsing/responding/accounting for the
7503 // message.
7504 if (compactProofs.getShortIDs().size() == 0) {
7505 return;
7506 }
7507
7508 // To determine the chance that the number of entries in a bucket
7509 // exceeds N, we use the fact that the number of elements in a single
7510 // bucket is binomially distributed (with n = the number of shorttxids
7511 // S, and p = 1 / the number of buckets), that in the worst case the
7512 // number of buckets is equal to S (due to std::unordered_map having a
7513 // default load factor of 1.0), and that the chance for any bucket to
7514 // exceed N elements is at most buckets * (the chance that any given
7515 // bucket is above N elements). Thus:
7516 // P(max_elements_per_bucket > N) <=
7517 // S * (1 - cdf(binomial(n=S,p=1/S), N))
7518 // If we assume up to 21000000, allowing 15 elements per bucket should
7519 // only fail once per ~2.5 million avaproofs transfers (per peer and
7520 // connection).
7521 // TODO re-evaluate the bucket count to a more realistic value.
7522 // TODO: In the case of a shortid-collision, we should request all the
7523 // proofs which collided. For now, we only request one, which is not
7524 // that bad considering this event is expected to be very rare.
7525 auto shortIdProcessor =
7527 compactProofs.getShortIDs(), 15);
7528
7529 if (shortIdProcessor.hasOutOfBoundIndex()) {
7530 // This should be catched by deserialization, but catch it here as
7531 // well as a good measure.
7532 Misbehaving(*peer, "avaproofs-bad-indexes");
7533 return;
7534 }
7535 if (!shortIdProcessor.isEvenlyDistributed()) {
7536 // This is suspicious, don't ban but bail out
7537 return;
7538 }
7539
7540 std::vector<std::pair<avalanche::ProofId, bool>> remoteProofsStatus;
7541 m_avalanche->withPeerManager([&](const avalanche::PeerManager &pm) {
7542 pm.forEachPeer([&](const avalanche::Peer &peer) {
7543 assert(peer.proof);
7544 uint64_t shortid = compactProofs.getShortID(peer.getProofId());
7545
7546 int added =
7547 shortIdProcessor.matchKnownItem(shortid, peer.proof);
7548
7549 // No collision
7550 if (added >= 0) {
7551 // Because we know the proof, we can determine if our peer
7552 // has it (added = 1) or not (added = 0) and update the
7553 // remote proof status accordingly.
7554 remoteProofsStatus.emplace_back(peer.getProofId(),
7555 added > 0);
7556 }
7557
7558 // In order to properly determine which proof is missing, we
7559 // need to keep scanning for all our proofs.
7560 return true;
7561 });
7562 });
7563
7565 for (size_t i = 0; i < compactProofs.size(); i++) {
7566 if (shortIdProcessor.getItem(i) == nullptr) {
7567 req.indices.push_back(i);
7568 }
7569 }
7570
7571 m_connman.PushMessage(&pfrom,
7572 msgMaker.Make(NetMsgType::AVAPROOFSREQ, req));
7573
7574 const NodeId nodeid = pfrom.GetId();
7575
7576 // We want to keep a count of how many nodes we successfully requested
7577 // avaproofs from as this is used to determine when we are confident our
7578 // quorum is close enough to the other participants.
7579 m_avalanche->avaproofsSent(nodeid);
7580
7581 // Only save remote proofs from stakers
7583 return pfrom.m_avalanche_pubkey.has_value())) {
7584 m_avalanche->withPeerManager(
7585 [&remoteProofsStatus, nodeid](avalanche::PeerManager &pm) {
7586 for (const auto &[proofid, present] : remoteProofsStatus) {
7587 pm.saveRemoteProof(proofid, nodeid, present);
7588 }
7589 });
7590 }
7591
7592 return;
7593 }
7594
7595 if (msg_type == NetMsgType::AVAPROOFSREQ) {
7596 if (peer->m_proof_relay == nullptr) {
7597 return;
7598 }
7599
7600 avalanche::ProofsRequest proofreq;
7601 vRecv >> proofreq;
7602
7603 auto requestedIndiceIt = proofreq.indices.begin();
7604 uint32_t treeIndice = 0;
7605 peer->m_proof_relay->sharedProofs.forEachLeaf([&](const auto &proof) {
7606 if (requestedIndiceIt == proofreq.indices.end()) {
7607 // No more indice to process
7608 return false;
7609 }
7610
7611 if (treeIndice++ == *requestedIndiceIt) {
7612 m_connman.PushMessage(
7613 &pfrom, msgMaker.Make(NetMsgType::AVAPROOF, *proof));
7614 requestedIndiceIt++;
7615 }
7616
7617 return true;
7618 });
7619
7620 peer->m_proof_relay->sharedProofs = {};
7621 return;
7622 }
7623
7624 if (msg_type == NetMsgType::GETADDR) {
7625 // This asymmetric behavior for inbound and outbound connections was
7626 // introduced to prevent a fingerprinting attack: an attacker can send
7627 // specific fake addresses to users' AddrMan and later request them by
7628 // sending getaddr messages. Making nodes which are behind NAT and can
7629 // only make outgoing connections ignore the getaddr message mitigates
7630 // the attack.
7631 if (!pfrom.IsInboundConn()) {
7633 "Ignoring \"getaddr\" from %s connection. peer=%d\n",
7634 pfrom.ConnectionTypeAsString(), pfrom.GetId());
7635 return;
7636 }
7637
7638 // Since this must be an inbound connection, SetupAddressRelay will
7639 // never fail.
7640 Assume(SetupAddressRelay(pfrom, *peer));
7641
7642 // Only send one GetAddr response per connection to reduce resource
7643 // waste and discourage addr stamping of INV announcements.
7644 if (peer->m_getaddr_recvd) {
7645 LogPrint(BCLog::NET, "Ignoring repeated \"getaddr\". peer=%d\n",
7646 pfrom.GetId());
7647 return;
7648 }
7649 peer->m_getaddr_recvd = true;
7650
7651 peer->m_addrs_to_send.clear();
7652 std::vector<CAddress> vAddr;
7653 const size_t maxAddrToSend = m_opts.max_addr_to_send;
7655 vAddr = m_connman.GetAddresses(maxAddrToSend, MAX_PCT_ADDR_TO_SEND,
7656 /* network */ std::nullopt);
7657 } else {
7658 vAddr = m_connman.GetAddresses(pfrom, maxAddrToSend,
7660 }
7661 for (const CAddress &addr : vAddr) {
7662 PushAddress(*peer, addr);
7663 }
7664 return;
7665 }
7666
7667 if (msg_type == NetMsgType::GETAVAADDR) {
7668 auto now = GetTime<std::chrono::seconds>();
7669 if (now < pfrom.m_nextGetAvaAddr) {
7670 // Prevent a peer from exhausting our resources by spamming
7671 // getavaaddr messages.
7672 return;
7673 }
7674
7675 // Only accept a getavaaddr every GETAVAADDR_INTERVAL at most
7677
7678 if (!SetupAddressRelay(pfrom, *peer)) {
7680 "Ignoring getavaaddr message from %s peer=%d\n",
7681 pfrom.ConnectionTypeAsString(), pfrom.GetId());
7682 return;
7683 }
7684
7685 auto availabilityScoreComparator = [](const CNode *lhs,
7686 const CNode *rhs) {
7687 double scoreLhs = lhs->getAvailabilityScore();
7688 double scoreRhs = rhs->getAvailabilityScore();
7689
7690 if (scoreLhs != scoreRhs) {
7691 return scoreLhs > scoreRhs;
7692 }
7693
7694 return lhs < rhs;
7695 };
7696
7697 // Get up to MAX_ADDR_TO_SEND addresses of the nodes which are the
7698 // most active in the avalanche network. Account for 0 availability as
7699 // well so we can send addresses even if we did not start polling yet.
7700 std::set<const CNode *, decltype(availabilityScoreComparator)> avaNodes(
7701 availabilityScoreComparator);
7702 m_connman.ForEachNode([&](const CNode *pnode) {
7703 if (!pnode->m_avalanche_enabled ||
7704 pnode->getAvailabilityScore() < 0.) {
7705 return;
7706 }
7707
7708 avaNodes.insert(pnode);
7709 if (avaNodes.size() > m_opts.max_addr_to_send) {
7710 avaNodes.erase(std::prev(avaNodes.end()));
7711 }
7712 });
7713
7714 peer->m_addrs_to_send.clear();
7715 for (const CNode *pnode : avaNodes) {
7716 PushAddress(*peer, pnode->addr);
7717 }
7718
7719 return;
7720 }
7721
7722 if (msg_type == NetMsgType::MEMPOOL) {
7723 if (!(peer->m_our_services & NODE_BLOOM) &&
7727 "mempool request with bloom filters disabled, "
7728 "disconnect peer=%d\n",
7729 pfrom.GetId());
7730 pfrom.fDisconnect = true;
7731 }
7732 return;
7733 }
7734
7735 if (m_connman.OutboundTargetReached(false) &&
7739 "mempool request with bandwidth limit reached, "
7740 "disconnect peer=%d\n",
7741 pfrom.GetId());
7742 pfrom.fDisconnect = true;
7743 }
7744 return;
7745 }
7746
7747 if (auto tx_relay = peer->GetTxRelay()) {
7748 LOCK(tx_relay->m_tx_inventory_mutex);
7749 tx_relay->m_send_mempool = true;
7750 }
7751 return;
7752 }
7753
7754 if (msg_type == NetMsgType::PING) {
7755 if (pfrom.GetCommonVersion() > BIP0031_VERSION) {
7756 uint64_t nonce = 0;
7757 vRecv >> nonce;
7758 // Echo the message back with the nonce. This allows for two useful
7759 // features:
7760 //
7761 // 1) A remote node can quickly check if the connection is
7762 // operational.
7763 // 2) Remote nodes can measure the latency of the network thread. If
7764 // this node is overloaded it won't respond to pings quickly and the
7765 // remote node can avoid sending us more work, like chain download
7766 // requests.
7767 //
7768 // The nonce stops the remote getting confused between different
7769 // pings: without it, if the remote node sends a ping once per
7770 // second and this node takes 5 seconds to respond to each, the 5th
7771 // ping the remote sends would appear to return very quickly.
7772 m_connman.PushMessage(&pfrom,
7773 msgMaker.Make(NetMsgType::PONG, nonce));
7774 }
7775 return;
7776 }
7777
7778 if (msg_type == NetMsgType::PONG) {
7779 const auto ping_end = time_received;
7780 uint64_t nonce = 0;
7781 size_t nAvail = vRecv.in_avail();
7782 bool bPingFinished = false;
7783 std::string sProblem;
7784
7785 if (nAvail >= sizeof(nonce)) {
7786 vRecv >> nonce;
7787
7788 // Only process pong message if there is an outstanding ping (old
7789 // ping without nonce should never pong)
7790 if (peer->m_ping_nonce_sent != 0) {
7791 if (nonce == peer->m_ping_nonce_sent) {
7792 // Matching pong received, this ping is no longer
7793 // outstanding
7794 bPingFinished = true;
7795 const auto ping_time = ping_end - peer->m_ping_start.load();
7796 if (ping_time.count() >= 0) {
7797 // Let connman know about this successful ping-pong
7798 pfrom.PongReceived(ping_time);
7799 } else {
7800 // This should never happen
7801 sProblem = "Timing mishap";
7802 }
7803 } else {
7804 // Nonce mismatches are normal when pings are overlapping
7805 sProblem = "Nonce mismatch";
7806 if (nonce == 0) {
7807 // This is most likely a bug in another implementation
7808 // somewhere; cancel this ping
7809 bPingFinished = true;
7810 sProblem = "Nonce zero";
7811 }
7812 }
7813 } else {
7814 sProblem = "Unsolicited pong without ping";
7815 }
7816 } else {
7817 // This is most likely a bug in another implementation somewhere;
7818 // cancel this ping
7819 bPingFinished = true;
7820 sProblem = "Short payload";
7821 }
7822
7823 if (!(sProblem.empty())) {
7825 "pong peer=%d: %s, %x expected, %x received, %u bytes\n",
7826 pfrom.GetId(), sProblem, peer->m_ping_nonce_sent, nonce,
7827 nAvail);
7828 }
7829 if (bPingFinished) {
7830 peer->m_ping_nonce_sent = 0;
7831 }
7832 return;
7833 }
7834
7835 if (msg_type == NetMsgType::FILTERLOAD) {
7836 if (!(peer->m_our_services & NODE_BLOOM)) {
7838 "filterload received despite not offering bloom services "
7839 "from peer=%d; disconnecting\n",
7840 pfrom.GetId());
7841 pfrom.fDisconnect = true;
7842 return;
7843 }
7844 CBloomFilter filter;
7845 vRecv >> filter;
7846
7847 if (!filter.IsWithinSizeConstraints()) {
7848 // There is no excuse for sending a too-large filter
7849 Misbehaving(*peer, "too-large bloom filter");
7850 } else if (auto tx_relay = peer->GetTxRelay()) {
7851 {
7852 LOCK(tx_relay->m_bloom_filter_mutex);
7853 tx_relay->m_bloom_filter.reset(new CBloomFilter(filter));
7854 tx_relay->m_relay_txs = true;
7855 }
7856 pfrom.m_bloom_filter_loaded = true;
7857 }
7858 return;
7859 }
7860
7861 if (msg_type == NetMsgType::FILTERADD) {
7862 if (!(peer->m_our_services & NODE_BLOOM)) {
7864 "filteradd received despite not offering bloom services "
7865 "from peer=%d; disconnecting\n",
7866 pfrom.GetId());
7867 pfrom.fDisconnect = true;
7868 return;
7869 }
7870 std::vector<uint8_t> vData;
7871 vRecv >> vData;
7872
7873 // Nodes must NEVER send a data item > 520 bytes (the max size for a
7874 // script data object, and thus, the maximum size any matched object can
7875 // have) in a filteradd message.
7876 bool bad = false;
7877 if (vData.size() > MAX_SCRIPT_ELEMENT_SIZE) {
7878 bad = true;
7879 } else if (auto tx_relay = peer->GetTxRelay()) {
7880 LOCK(tx_relay->m_bloom_filter_mutex);
7881 if (tx_relay->m_bloom_filter) {
7882 tx_relay->m_bloom_filter->insert(vData);
7883 } else {
7884 bad = true;
7885 }
7886 }
7887 if (bad) {
7888 // The structure of this code doesn't really allow for a good error
7889 // code. We'll go generic.
7890 Misbehaving(*peer, "bad filteradd message");
7891 }
7892 return;
7893 }
7894
7895 if (msg_type == NetMsgType::FILTERCLEAR) {
7896 if (!(peer->m_our_services & NODE_BLOOM)) {
7898 "filterclear received despite not offering bloom services "
7899 "from peer=%d; disconnecting\n",
7900 pfrom.GetId());
7901 pfrom.fDisconnect = true;
7902 return;
7903 }
7904 auto tx_relay = peer->GetTxRelay();
7905 if (!tx_relay) {
7906 return;
7907 }
7908
7909 {
7910 LOCK(tx_relay->m_bloom_filter_mutex);
7911 tx_relay->m_bloom_filter = nullptr;
7912 tx_relay->m_relay_txs = true;
7913 }
7914 pfrom.m_bloom_filter_loaded = false;
7915 pfrom.m_relays_txs = true;
7916 return;
7917 }
7918
7919 if (msg_type == NetMsgType::FEEFILTER) {
7920 Amount newFeeFilter = Amount::zero();
7921 vRecv >> newFeeFilter;
7922 if (MoneyRange(newFeeFilter)) {
7923 if (auto tx_relay = peer->GetTxRelay()) {
7924 tx_relay->m_fee_filter_received = newFeeFilter;
7925 }
7926 LogPrint(BCLog::NET, "received: feefilter of %s from peer=%d\n",
7927 CFeeRate(newFeeFilter).ToString(), pfrom.GetId());
7928 }
7929 return;
7930 }
7931
7932 if (msg_type == NetMsgType::GETCFILTERS) {
7933 ProcessGetCFilters(pfrom, *peer, vRecv);
7934 return;
7935 }
7936
7937 if (msg_type == NetMsgType::GETCFHEADERS) {
7938 ProcessGetCFHeaders(pfrom, *peer, vRecv);
7939 return;
7940 }
7941
7942 if (msg_type == NetMsgType::GETCFCHECKPT) {
7943 ProcessGetCFCheckPt(pfrom, *peer, vRecv);
7944 return;
7945 }
7946
7947 if (msg_type == NetMsgType::NOTFOUND) {
7948 std::vector<CInv> vInv;
7949 vRecv >> vInv;
7950 // A peer might send up to 1 notfound per getdata request, but no more
7951 if (vInv.size() <= PROOF_REQUEST_PARAMS.max_peer_announcements +
7954 for (CInv &inv : vInv) {
7955 if (inv.IsMsgTx()) {
7956 // If we receive a NOTFOUND message for a tx we requested,
7957 // mark the announcement for it as completed in
7958 // InvRequestTracker.
7959 LOCK(::cs_main);
7960 m_txrequest.ReceivedResponse(pfrom.GetId(), TxId(inv.hash));
7961 continue;
7962 }
7963 if (inv.IsMsgProof()) {
7964 if (!m_avalanche) {
7965 continue;
7966 }
7967 LOCK(cs_proofrequest);
7968 m_proofrequest.ReceivedResponse(
7969 pfrom.GetId(), avalanche::ProofId(inv.hash));
7970 }
7971 }
7972 }
7973 return;
7974 }
7975
7976 // Ignore unknown commands for extensibility
7977 LogPrint(BCLog::NET, "Unknown command \"%s\" from peer=%d\n",
7978 SanitizeString(msg_type), pfrom.GetId());
7979 return;
7980}
7981
7982bool PeerManagerImpl::MaybeDiscourageAndDisconnect(CNode &pnode, Peer &peer) {
7983 {
7984 LOCK(peer.m_misbehavior_mutex);
7985
7986 // There's nothing to do if the m_should_discourage flag isn't set
7987 if (!peer.m_should_discourage) {
7988 return false;
7989 }
7990
7991 peer.m_should_discourage = false;
7992 } // peer.m_misbehavior_mutex
7993
7995 // We never disconnect or discourage peers for bad behavior if they have
7996 // NetPermissionFlags::NoBan permission
7997 LogPrintf("Warning: not punishing noban peer %d!\n", peer.m_id);
7998 return false;
7999 }
8000
8001 if (pnode.IsManualConn()) {
8002 // We never disconnect or discourage manual peers for bad behavior
8003 LogPrintf("Warning: not punishing manually connected peer %d!\n",
8004 peer.m_id);
8005 return false;
8006 }
8007
8008 if (pnode.addr.IsLocal()) {
8009 // We disconnect local peers for bad behavior but don't discourage
8010 // (since that would discourage all peers on the same local address)
8012 "Warning: disconnecting but not discouraging %s peer %d!\n",
8013 pnode.m_inbound_onion ? "inbound onion" : "local", peer.m_id);
8014 pnode.fDisconnect = true;
8015 return true;
8016 }
8017
8018 // Normal case: Disconnect the peer and discourage all nodes sharing the
8019 // address
8020 LogPrint(BCLog::NET, "Disconnecting and discouraging peer %d!\n",
8021 peer.m_id);
8022 if (m_banman) {
8023 m_banman->Discourage(pnode.addr);
8024 }
8025 m_connman.DisconnectNode(pnode.addr);
8026 return true;
8027}
8028
8029bool PeerManagerImpl::ProcessMessages(const Config &config, CNode *pfrom,
8030 std::atomic<bool> &interruptMsgProc) {
8031 AssertLockHeld(g_msgproc_mutex);
8032
8033 //
8034 // Message format
8035 // (4) message start
8036 // (12) command
8037 // (4) size
8038 // (4) checksum
8039 // (x) data
8040 //
8041
8042 PeerRef peer = GetPeerRef(pfrom->GetId());
8043 if (peer == nullptr) {
8044 return false;
8045 }
8046
8047 {
8048 LOCK(peer->m_getdata_requests_mutex);
8049 if (!peer->m_getdata_requests.empty()) {
8050 ProcessGetData(config, *pfrom, *peer, interruptMsgProc);
8051 }
8052 }
8053
8054 const bool processed_orphan = ProcessOrphanTx(config, *peer);
8055
8056 if (pfrom->fDisconnect) {
8057 return false;
8058 }
8059
8060 if (processed_orphan) {
8061 return true;
8062 }
8063
8064 // this maintains the order of responses and prevents m_getdata_requests to
8065 // grow unbounded
8066 {
8067 LOCK(peer->m_getdata_requests_mutex);
8068 if (!peer->m_getdata_requests.empty()) {
8069 return true;
8070 }
8071 }
8072
8073 // Don't bother if send buffer is too full to respond anyway
8074 if (pfrom->fPauseSend) {
8075 return false;
8076 }
8077
8078 auto poll_result{pfrom->PollMessage()};
8079 if (!poll_result) {
8080 // No message to process
8081 return false;
8082 }
8083
8084 CNetMessage &msg{poll_result->first};
8085 bool fMoreWork = poll_result->second;
8086
8087 TRACE6(net, inbound_message, pfrom->GetId(), pfrom->m_addr_name.c_str(),
8088 pfrom->ConnectionTypeAsString().c_str(), msg.m_type.c_str(),
8089 msg.m_recv.size(), msg.m_recv.data());
8090
8091 if (m_opts.capture_messages) {
8092 CaptureMessage(pfrom->addr, msg.m_type, MakeUCharSpan(msg.m_recv),
8093 /*is_incoming=*/true);
8094 }
8095
8096 msg.SetVersion(pfrom->GetCommonVersion());
8097
8098 // Check network magic
8099 if (!msg.m_valid_netmagic) {
8101 "PROCESSMESSAGE: INVALID MESSAGESTART %s peer=%d\n",
8102 SanitizeString(msg.m_type), pfrom->GetId());
8103
8104 // Make sure we discourage where that come from for some time.
8105 if (m_banman) {
8106 m_banman->Discourage(pfrom->addr);
8107 }
8108 m_connman.DisconnectNode(pfrom->addr);
8109
8110 pfrom->fDisconnect = true;
8111 return false;
8112 }
8113
8114 // Check header
8115 if (!msg.m_valid_header) {
8116 LogPrint(BCLog::NET, "PROCESSMESSAGE: ERRORS IN HEADER %s peer=%d\n",
8117 SanitizeString(msg.m_type), pfrom->GetId());
8118 return fMoreWork;
8119 }
8120
8121 // Checksum
8122 CDataStream &vRecv = msg.m_recv;
8123 if (!msg.m_valid_checksum) {
8124 LogPrint(BCLog::NET, "%s(%s, %u bytes): CHECKSUM ERROR peer=%d\n",
8125 __func__, SanitizeString(msg.m_type), msg.m_message_size,
8126 pfrom->GetId());
8127 if (m_banman) {
8128 m_banman->Discourage(pfrom->addr);
8129 }
8130 m_connman.DisconnectNode(pfrom->addr);
8131 return fMoreWork;
8132 }
8133
8134 try {
8135 ProcessMessage(config, *pfrom, msg.m_type, vRecv, msg.m_time,
8136 interruptMsgProc);
8137 if (interruptMsgProc) {
8138 return false;
8139 }
8140
8141 {
8142 LOCK(peer->m_getdata_requests_mutex);
8143 if (!peer->m_getdata_requests.empty()) {
8144 fMoreWork = true;
8145 }
8146 }
8147 // Does this peer has an orphan ready to reconsider?
8148 // (Note: we may have provided a parent for an orphan provided by
8149 // another peer that was already processed; in that case, the extra work
8150 // may not be noticed, possibly resulting in an unnecessary 100ms delay)
8151 if (m_mempool.withOrphanage([&peer](TxOrphanage &orphanage) {
8152 return orphanage.HaveTxToReconsider(peer->m_id);
8153 })) {
8154 fMoreWork = true;
8155 }
8156 } catch (const std::exception &e) {
8157 LogPrint(BCLog::NET, "%s(%s, %u bytes): Exception '%s' (%s) caught\n",
8158 __func__, SanitizeString(msg.m_type), msg.m_message_size,
8159 e.what(), typeid(e).name());
8160 } catch (...) {
8161 LogPrint(BCLog::NET, "%s(%s, %u bytes): Unknown exception caught\n",
8162 __func__, SanitizeString(msg.m_type), msg.m_message_size);
8163 }
8164
8165 return fMoreWork;
8166}
8167
8168void PeerManagerImpl::ConsiderEviction(CNode &pto, Peer &peer,
8169 std::chrono::seconds time_in_seconds) {
8171
8172 CNodeState &state = *State(pto.GetId());
8173 const CNetMsgMaker msgMaker(pto.GetCommonVersion());
8174
8175 if (!state.m_chain_sync.m_protect && pto.IsOutboundOrBlockRelayConn() &&
8176 state.fSyncStarted) {
8177 // This is an outbound peer subject to disconnection if they don't
8178 // announce a block with as much work as the current tip within
8179 // CHAIN_SYNC_TIMEOUT + HEADERS_RESPONSE_TIME seconds (note: if their
8180 // chain has more work than ours, we should sync to it, unless it's
8181 // invalid, in which case we should find that out and disconnect from
8182 // them elsewhere).
8183 if (state.pindexBestKnownBlock != nullptr &&
8184 state.pindexBestKnownBlock->nChainWork >=
8185 m_chainman.ActiveChain().Tip()->nChainWork) {
8186 if (state.m_chain_sync.m_timeout != 0s) {
8187 state.m_chain_sync.m_timeout = 0s;
8188 state.m_chain_sync.m_work_header = nullptr;
8189 state.m_chain_sync.m_sent_getheaders = false;
8190 }
8191 } else if (state.m_chain_sync.m_timeout == 0s ||
8192 (state.m_chain_sync.m_work_header != nullptr &&
8193 state.pindexBestKnownBlock != nullptr &&
8194 state.pindexBestKnownBlock->nChainWork >=
8195 state.m_chain_sync.m_work_header->nChainWork)) {
8196 // Our best block known by this peer is behind our tip, and we're
8197 // either noticing that for the first time, OR this peer was able to
8198 // catch up to some earlier point where we checked against our tip.
8199 // Either way, set a new timeout based on current tip.
8200 state.m_chain_sync.m_timeout = time_in_seconds + CHAIN_SYNC_TIMEOUT;
8201 state.m_chain_sync.m_work_header = m_chainman.ActiveChain().Tip();
8202 state.m_chain_sync.m_sent_getheaders = false;
8203 } else if (state.m_chain_sync.m_timeout > 0s &&
8204 time_in_seconds > state.m_chain_sync.m_timeout) {
8205 // No evidence yet that our peer has synced to a chain with work
8206 // equal to that of our tip, when we first detected it was behind.
8207 // Send a single getheaders message to give the peer a chance to
8208 // update us.
8209 if (state.m_chain_sync.m_sent_getheaders) {
8210 // They've run out of time to catch up!
8211 LogPrintf(
8212 "Disconnecting outbound peer %d for old chain, best known "
8213 "block = %s\n",
8214 pto.GetId(),
8215 state.pindexBestKnownBlock != nullptr
8216 ? state.pindexBestKnownBlock->GetBlockHash().ToString()
8217 : "<none>");
8218 pto.fDisconnect = true;
8219 } else {
8220 assert(state.m_chain_sync.m_work_header);
8221 // Here, we assume that the getheaders message goes out,
8222 // because it'll either go out or be skipped because of a
8223 // getheaders in-flight already, in which case the peer should
8224 // still respond to us with a sufficiently high work chain tip.
8225 MaybeSendGetHeaders(
8226 pto, GetLocator(state.m_chain_sync.m_work_header->pprev),
8227 peer);
8228 LogPrint(
8229 BCLog::NET,
8230 "sending getheaders to outbound peer=%d to verify chain "
8231 "work (current best known block:%s, benchmark blockhash: "
8232 "%s)\n",
8233 pto.GetId(),
8234 state.pindexBestKnownBlock != nullptr
8235 ? state.pindexBestKnownBlock->GetBlockHash().ToString()
8236 : "<none>",
8237 state.m_chain_sync.m_work_header->GetBlockHash()
8238 .ToString());
8239 state.m_chain_sync.m_sent_getheaders = true;
8240 // Bump the timeout to allow a response, which could clear the
8241 // timeout (if the response shows the peer has synced), reset
8242 // the timeout (if the peer syncs to the required work but not
8243 // to our tip), or result in disconnect (if we advance to the
8244 // timeout and pindexBestKnownBlock has not sufficiently
8245 // progressed)
8246 state.m_chain_sync.m_timeout =
8247 time_in_seconds + HEADERS_RESPONSE_TIME;
8248 }
8249 }
8250 }
8251}
8252
8253void PeerManagerImpl::EvictExtraOutboundPeers(std::chrono::seconds now) {
8254 // If we have any extra block-relay-only peers, disconnect the youngest
8255 // unless it's given us a block -- in which case, compare with the
8256 // second-youngest, and out of those two, disconnect the peer who least
8257 // recently gave us a block.
8258 // The youngest block-relay-only peer would be the extra peer we connected
8259 // to temporarily in order to sync our tip; see net.cpp.
8260 // Note that we use higher nodeid as a measure for most recent connection.
8261 if (m_connman.GetExtraBlockRelayCount() > 0) {
8262 std::pair<NodeId, std::chrono::seconds> youngest_peer{-1, 0},
8263 next_youngest_peer{-1, 0};
8264
8265 m_connman.ForEachNode([&](CNode *pnode) {
8266 if (!pnode->IsBlockOnlyConn() || pnode->fDisconnect) {
8267 return;
8268 }
8269 if (pnode->GetId() > youngest_peer.first) {
8270 next_youngest_peer = youngest_peer;
8271 youngest_peer.first = pnode->GetId();
8272 youngest_peer.second = pnode->m_last_block_time;
8273 }
8274 });
8275
8276 NodeId to_disconnect = youngest_peer.first;
8277 if (youngest_peer.second > next_youngest_peer.second) {
8278 // Our newest block-relay-only peer gave us a block more recently;
8279 // disconnect our second youngest.
8280 to_disconnect = next_youngest_peer.first;
8281 }
8282
8283 m_connman.ForNode(
8284 to_disconnect,
8287 // Make sure we're not getting a block right now, and that we've
8288 // been connected long enough for this eviction to happen at
8289 // all. Note that we only request blocks from a peer if we learn
8290 // of a valid headers chain with at least as much work as our
8291 // tip.
8292 CNodeState *node_state = State(pnode->GetId());
8293 if (node_state == nullptr ||
8294 (now - pnode->m_connected >= MINIMUM_CONNECT_TIME &&
8295 node_state->vBlocksInFlight.empty())) {
8296 pnode->fDisconnect = true;
8298 "disconnecting extra block-relay-only peer=%d "
8299 "(last block received at time %d)\n",
8300 pnode->GetId(),
8302 return true;
8303 } else {
8304 LogPrint(
8305 BCLog::NET,
8306 "keeping block-relay-only peer=%d chosen for eviction "
8307 "(connect time: %d, blocks_in_flight: %d)\n",
8308 pnode->GetId(), count_seconds(pnode->m_connected),
8309 node_state->vBlocksInFlight.size());
8310 }
8311 return false;
8312 });
8313 }
8314
8315 // Check whether we have too many OUTBOUND_FULL_RELAY peers
8316 if (m_connman.GetExtraFullOutboundCount() <= 0) {
8317 return;
8318 }
8319
8320 // If we have more OUTBOUND_FULL_RELAY peers than we target, disconnect one.
8321 // Pick the OUTBOUND_FULL_RELAY peer that least recently announced us a new
8322 // block, with ties broken by choosing the more recent connection (higher
8323 // node id)
8324 NodeId worst_peer = -1;
8325 int64_t oldest_block_announcement = std::numeric_limits<int64_t>::max();
8326
8327 m_connman.ForEachNode([&](CNode *pnode) EXCLUSIVE_LOCKS_REQUIRED(
8328 ::cs_main) {
8330
8331 // Only consider OUTBOUND_FULL_RELAY peers that are not already marked
8332 // for disconnection
8333 if (!pnode->IsFullOutboundConn() || pnode->fDisconnect) {
8334 return;
8335 }
8336 CNodeState *state = State(pnode->GetId());
8337 if (state == nullptr) {
8338 // shouldn't be possible, but just in case
8339 return;
8340 }
8341 // Don't evict our protected peers
8342 if (state->m_chain_sync.m_protect) {
8343 return;
8344 }
8345 if (state->m_last_block_announcement < oldest_block_announcement ||
8346 (state->m_last_block_announcement == oldest_block_announcement &&
8347 pnode->GetId() > worst_peer)) {
8348 worst_peer = pnode->GetId();
8349 oldest_block_announcement = state->m_last_block_announcement;
8350 }
8351 });
8352
8353 if (worst_peer == -1) {
8354 return;
8355 }
8356
8357 bool disconnected = m_connman.ForNode(
8358 worst_peer, [&](CNode *pnode) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) {
8360
8361 // Only disconnect a peer that has been connected to us for some
8362 // reasonable fraction of our check-frequency, to give it time for
8363 // new information to have arrived. Also don't disconnect any peer
8364 // we're trying to download a block from.
8365 CNodeState &state = *State(pnode->GetId());
8366 if (now - pnode->m_connected > MINIMUM_CONNECT_TIME &&
8367 state.vBlocksInFlight.empty()) {
8369 "disconnecting extra outbound peer=%d (last block "
8370 "announcement received at time %d)\n",
8371 pnode->GetId(), oldest_block_announcement);
8372 pnode->fDisconnect = true;
8373 return true;
8374 } else {
8376 "keeping outbound peer=%d chosen for eviction "
8377 "(connect time: %d, blocks_in_flight: %d)\n",
8378 pnode->GetId(), count_seconds(pnode->m_connected),
8379 state.vBlocksInFlight.size());
8380 return false;
8381 }
8382 });
8383
8384 if (disconnected) {
8385 // If we disconnected an extra peer, that means we successfully
8386 // connected to at least one peer after the last time we detected a
8387 // stale tip. Don't try any more extra peers until we next detect a
8388 // stale tip, to limit the load we put on the network from these extra
8389 // connections.
8390 m_connman.SetTryNewOutboundPeer(false);
8391 }
8392}
8393
8394void PeerManagerImpl::CheckForStaleTipAndEvictPeers() {
8395 LOCK(cs_main);
8396
8397 auto now{GetTime<std::chrono::seconds>()};
8398
8399 EvictExtraOutboundPeers(now);
8400
8401 if (now > m_stale_tip_check_time) {
8402 // Check whether our tip is stale, and if so, allow using an extra
8403 // outbound peer.
8404 if (!m_chainman.m_blockman.LoadingBlocks() &&
8405 m_connman.GetNetworkActive() && m_connman.GetUseAddrmanOutgoing() &&
8406 TipMayBeStale()) {
8407 LogPrintf("Potential stale tip detected, will try using extra "
8408 "outbound peer (last tip update: %d seconds ago)\n",
8409 count_seconds(now - m_last_tip_update.load()));
8410 m_connman.SetTryNewOutboundPeer(true);
8411 } else if (m_connman.GetTryNewOutboundPeer()) {
8412 m_connman.SetTryNewOutboundPeer(false);
8413 }
8414 m_stale_tip_check_time = now + STALE_CHECK_INTERVAL;
8415 }
8416
8417 if (!m_initial_sync_finished && CanDirectFetch()) {
8418 m_connman.StartExtraBlockRelayPeers();
8419 m_initial_sync_finished = true;
8420 }
8421}
8422
8423void PeerManagerImpl::MaybeSendPing(CNode &node_to, Peer &peer,
8424 std::chrono::microseconds now) {
8425 if (m_connman.ShouldRunInactivityChecks(
8426 node_to, std::chrono::duration_cast<std::chrono::seconds>(now)) &&
8427 peer.m_ping_nonce_sent &&
8428 now > peer.m_ping_start.load() + TIMEOUT_INTERVAL) {
8429 // The ping timeout is using mocktime. To disable the check during
8430 // testing, increase -peertimeout.
8431 LogPrint(BCLog::NET, "ping timeout: %fs peer=%d\n",
8432 0.000001 * count_microseconds(now - peer.m_ping_start.load()),
8433 peer.m_id);
8434 node_to.fDisconnect = true;
8435 return;
8436 }
8437
8438 const CNetMsgMaker msgMaker(node_to.GetCommonVersion());
8439 bool pingSend = false;
8440
8441 if (peer.m_ping_queued) {
8442 // RPC ping request by user
8443 pingSend = true;
8444 }
8445
8446 if (peer.m_ping_nonce_sent == 0 &&
8447 now > peer.m_ping_start.load() + PING_INTERVAL) {
8448 // Ping automatically sent as a latency probe & keepalive.
8449 pingSend = true;
8450 }
8451
8452 if (pingSend) {
8453 uint64_t nonce;
8454 do {
8455 nonce = FastRandomContext().rand64();
8456 } while (nonce == 0);
8457 peer.m_ping_queued = false;
8458 peer.m_ping_start = now;
8459 if (node_to.GetCommonVersion() > BIP0031_VERSION) {
8460 peer.m_ping_nonce_sent = nonce;
8461 m_connman.PushMessage(&node_to,
8462 msgMaker.Make(NetMsgType::PING, nonce));
8463 } else {
8464 // Peer is too old to support ping command with nonce, pong will
8465 // never arrive.
8466 peer.m_ping_nonce_sent = 0;
8467 m_connman.PushMessage(&node_to, msgMaker.Make(NetMsgType::PING));
8468 }
8469 }
8470}
8471
8472void PeerManagerImpl::MaybeSendAddr(CNode &node, Peer &peer,
8473 std::chrono::microseconds current_time) {
8474 // Nothing to do for non-address-relay peers
8475 if (!peer.m_addr_relay_enabled) {
8476 return;
8477 }
8478
8479 LOCK(peer.m_addr_send_times_mutex);
8480 if (fListen && !m_chainman.IsInitialBlockDownload() &&
8481 peer.m_next_local_addr_send < current_time) {
8482 // If we've sent before, clear the bloom filter for the peer, so
8483 // that our self-announcement will actually go out. This might
8484 // be unnecessary if the bloom filter has already rolled over
8485 // since our last self-announcement, but there is only a small
8486 // bandwidth cost that we can incur by doing this (which happens
8487 // once a day on average).
8488 if (peer.m_next_local_addr_send != 0us) {
8489 peer.m_addr_known->reset();
8490 }
8491 if (std::optional<CService> local_service = GetLocalAddrForPeer(node)) {
8492 CAddress local_addr{*local_service, peer.m_our_services,
8493 Now<NodeSeconds>()};
8494 PushAddress(peer, local_addr);
8495 }
8496 peer.m_next_local_addr_send =
8497 current_time +
8498 m_rng.rand_exp_duration(AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL);
8499 }
8500
8501 // We sent an `addr` message to this peer recently. Nothing more to do.
8502 if (current_time <= peer.m_next_addr_send) {
8503 return;
8504 }
8505
8506 peer.m_next_addr_send =
8507 current_time + m_rng.rand_exp_duration(AVG_ADDRESS_BROADCAST_INTERVAL);
8508
8509 const size_t max_addr_to_send = m_opts.max_addr_to_send;
8510 if (!Assume(peer.m_addrs_to_send.size() <= max_addr_to_send)) {
8511 // Should be impossible since we always check size before adding to
8512 // m_addrs_to_send. Recover by trimming the vector.
8513 peer.m_addrs_to_send.resize(max_addr_to_send);
8514 }
8515
8516 // Remove addr records that the peer already knows about, and add new
8517 // addrs to the m_addr_known filter on the same pass.
8518 auto addr_already_known =
8519 [&peer](const CAddress &addr)
8520 EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex) {
8521 bool ret = peer.m_addr_known->contains(addr.GetKey());
8522 if (!ret) {
8523 peer.m_addr_known->insert(addr.GetKey());
8524 }
8525 return ret;
8526 };
8527 peer.m_addrs_to_send.erase(std::remove_if(peer.m_addrs_to_send.begin(),
8528 peer.m_addrs_to_send.end(),
8529 addr_already_known),
8530 peer.m_addrs_to_send.end());
8531
8532 // No addr messages to send
8533 if (peer.m_addrs_to_send.empty()) {
8534 return;
8535 }
8536
8537 const char *msg_type;
8538 CNetAddr::Encoding ser_enc;
8539 if (peer.m_wants_addrv2) {
8540 msg_type = NetMsgType::ADDRV2;
8541 ser_enc = CNetAddr::Encoding::V2;
8542 } else {
8543 msg_type = NetMsgType::ADDR;
8544 ser_enc = CNetAddr::Encoding::V1;
8545 }
8546 m_connman.PushMessage(
8547 &node,
8548 CNetMsgMaker(node.GetCommonVersion())
8549 .Make(msg_type,
8550 WithParams(
8552 peer.m_addrs_to_send)));
8553 peer.m_addrs_to_send.clear();
8554
8555 // we only send the big addr message once
8556 if (peer.m_addrs_to_send.capacity() > 40) {
8557 peer.m_addrs_to_send.shrink_to_fit();
8558 }
8559}
8560
8561void PeerManagerImpl::MaybeSendSendHeaders(CNode &node, Peer &peer) {
8562 // Delay sending SENDHEADERS (BIP 130) until we're done with an
8563 // initial-headers-sync with this peer. Receiving headers announcements for
8564 // new blocks while trying to sync their headers chain is problematic,
8565 // because of the state tracking done.
8566 if (!peer.m_sent_sendheaders &&
8567 node.GetCommonVersion() >= SENDHEADERS_VERSION) {
8568 LOCK(cs_main);
8569 CNodeState &state = *State(node.GetId());
8570 if (state.pindexBestKnownBlock != nullptr &&
8571 state.pindexBestKnownBlock->nChainWork >
8572 m_chainman.MinimumChainWork()) {
8573 // Tell our peer we prefer to receive headers rather than inv's
8574 // We send this to non-NODE NETWORK peers as well, because even
8575 // non-NODE NETWORK peers can announce blocks (such as pruning
8576 // nodes)
8577 m_connman.PushMessage(&node, CNetMsgMaker(node.GetCommonVersion())
8579 peer.m_sent_sendheaders = true;
8580 }
8581 }
8582}
8583
8584void PeerManagerImpl::MaybeSendFeefilter(
8585 CNode &pto, Peer &peer, std::chrono::microseconds current_time) {
8586 if (m_opts.ignore_incoming_txs) {
8587 return;
8588 }
8589 if (pto.GetCommonVersion() < FEEFILTER_VERSION) {
8590 return;
8591 }
8592 // peers with the forcerelay permission should not filter txs to us
8594 return;
8595 }
8596 // Don't send feefilter messages to outbound block-relay-only peers since
8597 // they should never announce transactions to us, regardless of feefilter
8598 // state.
8599 if (pto.IsBlockOnlyConn()) {
8600 return;
8601 }
8602
8603 Amount currentFilter = m_mempool.GetMinFee().GetFeePerK();
8604
8605 if (m_chainman.IsInitialBlockDownload()) {
8606 // Received tx-inv messages are discarded when the active
8607 // chainstate is in IBD, so tell the peer to not send them.
8608 currentFilter = MAX_MONEY;
8609 } else {
8610 static const Amount MAX_FILTER{m_fee_filter_rounder.round(MAX_MONEY)};
8611 if (peer.m_fee_filter_sent == MAX_FILTER) {
8612 // Send the current filter if we sent MAX_FILTER previously
8613 // and made it out of IBD.
8614 peer.m_next_send_feefilter = 0us;
8615 }
8616 }
8617 if (current_time > peer.m_next_send_feefilter) {
8618 Amount filterToSend = m_fee_filter_rounder.round(currentFilter);
8619 // We always have a fee filter of at least the min relay fee
8620 filterToSend =
8621 std::max(filterToSend, m_mempool.m_min_relay_feerate.GetFeePerK());
8622 if (filterToSend != peer.m_fee_filter_sent) {
8623 m_connman.PushMessage(
8624 &pto, CNetMsgMaker(pto.GetCommonVersion())
8625 .Make(NetMsgType::FEEFILTER, filterToSend));
8626 peer.m_fee_filter_sent = filterToSend;
8627 }
8628 peer.m_next_send_feefilter =
8629 current_time +
8630 m_rng.rand_exp_duration(AVG_FEEFILTER_BROADCAST_INTERVAL);
8631 }
8632 // If the fee filter has changed substantially and it's still more than
8633 // MAX_FEEFILTER_CHANGE_DELAY until scheduled broadcast, then move the
8634 // broadcast to within MAX_FEEFILTER_CHANGE_DELAY.
8635 else if (current_time + MAX_FEEFILTER_CHANGE_DELAY <
8636 peer.m_next_send_feefilter &&
8637 (currentFilter < 3 * peer.m_fee_filter_sent / 4 ||
8638 currentFilter > 4 * peer.m_fee_filter_sent / 3)) {
8639 peer.m_next_send_feefilter =
8640 current_time +
8641 FastRandomContext().randrange<std::chrono::microseconds>(
8643 }
8644}
8645
8646namespace {
8647class CompareInvMempoolOrder {
8648 CTxMemPool *mp;
8649
8650public:
8651 explicit CompareInvMempoolOrder(CTxMemPool *_mempool) : mp(_mempool) {}
8652
8653 bool operator()(std::set<TxId>::iterator a, std::set<TxId>::iterator b) {
8658 return mp->CompareTopologically(*b, *a);
8659 }
8660};
8661} // namespace
8662
8663bool PeerManagerImpl::RejectIncomingTxs(const CNode &peer) const {
8664 // block-relay-only peers may never send txs to us
8665 if (peer.IsBlockOnlyConn()) {
8666 return true;
8667 }
8668 if (peer.IsFeelerConn()) {
8669 return true;
8670 }
8671 // In -blocksonly mode, peers need the 'relay' permission to send txs to us
8672 if (m_opts.ignore_incoming_txs &&
8674 return true;
8675 }
8676 return false;
8677}
8678
8679bool PeerManagerImpl::SetupAddressRelay(const CNode &node, Peer &peer) {
8680 // We don't participate in addr relay with outbound block-relay-only
8681 // connections to prevent providing adversaries with the additional
8682 // information of addr traffic to infer the link.
8683 if (node.IsBlockOnlyConn()) {
8684 return false;
8685 }
8686
8687 if (!peer.m_addr_relay_enabled.exchange(true)) {
8688 // During version message processing (non-block-relay-only outbound
8689 // peers) or on first addr-related message we have received (inbound
8690 // peers), initialize m_addr_known.
8691 peer.m_addr_known = std::make_unique<CRollingBloomFilter>(5000, 0.001);
8692 }
8693
8694 return true;
8695}
8696
8697bool PeerManagerImpl::SendMessages(const Config &config, CNode *pto) {
8698 AssertLockHeld(g_msgproc_mutex);
8699
8700 PeerRef peer = GetPeerRef(pto->GetId());
8701 if (!peer) {
8702 return false;
8703 }
8704 const Consensus::Params &consensusParams = m_chainparams.GetConsensus();
8705
8706 // We must call MaybeDiscourageAndDisconnect first, to ensure that we'll
8707 // disconnect misbehaving peers even before the version handshake is
8708 // complete.
8709 if (MaybeDiscourageAndDisconnect(*pto, *peer)) {
8710 return true;
8711 }
8712
8713 // Don't send anything until the version handshake is complete
8714 if (!pto->fSuccessfullyConnected || pto->fDisconnect) {
8715 return true;
8716 }
8717
8718 // If we get here, the outgoing message serialization version is set and
8719 // can't change.
8720 const CNetMsgMaker msgMaker(pto->GetCommonVersion());
8721
8722 const auto current_time{GetTime<std::chrono::microseconds>()};
8723
8724 if (pto->IsAddrFetchConn() &&
8725 current_time - pto->m_connected > 10 * AVG_ADDRESS_BROADCAST_INTERVAL) {
8727 "addrfetch connection timeout; disconnecting peer=%d\n",
8728 pto->GetId());
8729 pto->fDisconnect = true;
8730 return true;
8731 }
8732
8733 MaybeSendPing(*pto, *peer, current_time);
8734
8735 // MaybeSendPing may have marked peer for disconnection
8736 if (pto->fDisconnect) {
8737 return true;
8738 }
8739
8740 bool sync_blocks_and_headers_from_peer = false;
8741
8742 MaybeSendAddr(*pto, *peer, current_time);
8743
8744 MaybeSendSendHeaders(*pto, *peer);
8745
8746 {
8747 LOCK(cs_main);
8748
8749 CNodeState &state = *State(pto->GetId());
8750
8751 // Start block sync
8752 if (m_chainman.m_best_header == nullptr) {
8753 m_chainman.m_best_header = m_chainman.ActiveChain().Tip();
8754 }
8755
8756 // Determine whether we might try initial headers sync or parallel
8757 // block download from this peer -- this mostly affects behavior while
8758 // in IBD (once out of IBD, we sync from all peers).
8759 if (state.fPreferredDownload) {
8760 sync_blocks_and_headers_from_peer = true;
8761 } else if (CanServeBlocks(*peer) && !pto->IsAddrFetchConn()) {
8762 // Typically this is an inbound peer. If we don't have any outbound
8763 // peers, or if we aren't downloading any blocks from such peers,
8764 // then allow block downloads from this peer, too.
8765 // We prefer downloading blocks from outbound peers to avoid
8766 // putting undue load on (say) some home user who is just making
8767 // outbound connections to the network, but if our only source of
8768 // the latest blocks is from an inbound peer, we have to be sure to
8769 // eventually download it (and not just wait indefinitely for an
8770 // outbound peer to have it).
8771 if (m_num_preferred_download_peers == 0 ||
8772 mapBlocksInFlight.empty()) {
8773 sync_blocks_and_headers_from_peer = true;
8774 }
8775 }
8776
8777 if (!state.fSyncStarted && CanServeBlocks(*peer) &&
8778 !m_chainman.m_blockman.LoadingBlocks()) {
8779 // Only actively request headers from a single peer, unless we're
8780 // close to today.
8781 if ((nSyncStarted == 0 && sync_blocks_and_headers_from_peer) ||
8782 m_chainman.m_best_header->Time() > GetAdjustedTime() - 24h) {
8783 const CBlockIndex *pindexStart = m_chainman.m_best_header;
8792 if (pindexStart->pprev) {
8793 pindexStart = pindexStart->pprev;
8794 }
8795 if (MaybeSendGetHeaders(*pto, GetLocator(pindexStart), *peer)) {
8796 LogPrint(
8797 BCLog::NET,
8798 "initial getheaders (%d) to peer=%d (startheight:%d)\n",
8799 pindexStart->nHeight, pto->GetId(),
8800 peer->m_starting_height);
8801
8802 state.fSyncStarted = true;
8803 peer->m_headers_sync_timeout =
8804 current_time + HEADERS_DOWNLOAD_TIMEOUT_BASE +
8805 (
8806 // Convert HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER to
8807 // microseconds before scaling to maintain precision
8808 std::chrono::microseconds{
8810 Ticks<std::chrono::seconds>(
8811 GetAdjustedTime() -
8812 m_chainman.m_best_header->Time()) /
8813 consensusParams.nPowTargetSpacing);
8814 nSyncStarted++;
8815 }
8816 }
8817 }
8818
8819 //
8820 // Try sending block announcements via headers
8821 //
8822 {
8823 // If we have less than MAX_BLOCKS_TO_ANNOUNCE in our list of block
8824 // hashes we're relaying, and our peer wants headers announcements,
8825 // then find the first header not yet known to our peer but would
8826 // connect, and send. If no header would connect, or if we have too
8827 // many blocks, or if the peer doesn't want headers, just add all to
8828 // the inv queue.
8829 LOCK(peer->m_block_inv_mutex);
8830 std::vector<CBlock> vHeaders;
8831 bool fRevertToInv =
8832 ((!peer->m_prefers_headers &&
8833 (!state.m_requested_hb_cmpctblocks ||
8834 peer->m_blocks_for_headers_relay.size() > 1)) ||
8835 peer->m_blocks_for_headers_relay.size() >
8837 // last header queued for delivery
8838 const CBlockIndex *pBestIndex = nullptr;
8839 // ensure pindexBestKnownBlock is up-to-date
8840 ProcessBlockAvailability(pto->GetId());
8841
8842 if (!fRevertToInv) {
8843 bool fFoundStartingHeader = false;
8844 // Try to find first header that our peer doesn't have, and then
8845 // send all headers past that one. If we come across an headers
8846 // that aren't on m_chainman.ActiveChain(), give up.
8847 for (const BlockHash &hash : peer->m_blocks_for_headers_relay) {
8848 const CBlockIndex *pindex =
8849 m_chainman.m_blockman.LookupBlockIndex(hash);
8850 assert(pindex);
8851 if (m_chainman.ActiveChain()[pindex->nHeight] != pindex) {
8852 // Bail out if we reorged away from this block
8853 fRevertToInv = true;
8854 break;
8855 }
8856 if (pBestIndex != nullptr && pindex->pprev != pBestIndex) {
8857 // This means that the list of blocks to announce don't
8858 // connect to each other. This shouldn't really be
8859 // possible to hit during regular operation (because
8860 // reorgs should take us to a chain that has some block
8861 // not on the prior chain, which should be caught by the
8862 // prior check), but one way this could happen is by
8863 // using invalidateblock / reconsiderblock repeatedly on
8864 // the tip, causing it to be added multiple times to
8865 // m_blocks_for_headers_relay. Robustly deal with this
8866 // rare situation by reverting to an inv.
8867 fRevertToInv = true;
8868 break;
8869 }
8870 pBestIndex = pindex;
8871 if (fFoundStartingHeader) {
8872 // add this to the headers message
8873 vHeaders.push_back(pindex->GetBlockHeader());
8874 } else if (PeerHasHeader(&state, pindex)) {
8875 // Keep looking for the first new block.
8876 continue;
8877 } else if (pindex->pprev == nullptr ||
8878 PeerHasHeader(&state, pindex->pprev)) {
8879 // Peer doesn't have this header but they do have the
8880 // prior one. Start sending headers.
8881 fFoundStartingHeader = true;
8882 vHeaders.push_back(pindex->GetBlockHeader());
8883 } else {
8884 // Peer doesn't have this header or the prior one --
8885 // nothing will connect, so bail out.
8886 fRevertToInv = true;
8887 break;
8888 }
8889 }
8890 }
8891 if (!fRevertToInv && !vHeaders.empty()) {
8892 if (vHeaders.size() == 1 && state.m_requested_hb_cmpctblocks) {
8893 // We only send up to 1 block as header-and-ids, as
8894 // otherwise probably means we're doing an initial-ish-sync
8895 // or they're slow.
8897 "%s sending header-and-ids %s to peer=%d\n",
8898 __func__, vHeaders.front().GetHash().ToString(),
8899 pto->GetId());
8900
8901 std::optional<CSerializedNetMsg> cached_cmpctblock_msg;
8902 {
8903 LOCK(m_most_recent_block_mutex);
8904 if (m_most_recent_block_hash ==
8905 pBestIndex->GetBlockHash()) {
8906 cached_cmpctblock_msg =
8907 msgMaker.Make(NetMsgType::CMPCTBLOCK,
8908 *m_most_recent_compact_block);
8909 }
8910 }
8911 if (cached_cmpctblock_msg.has_value()) {
8912 m_connman.PushMessage(
8913 pto, std::move(cached_cmpctblock_msg.value()));
8914 } else {
8915 CBlock block;
8916 const bool ret{m_chainman.m_blockman.ReadBlockFromDisk(
8917 block, *pBestIndex)};
8918 assert(ret);
8919 CBlockHeaderAndShortTxIDs cmpctblock(block);
8920 m_connman.PushMessage(
8921 pto,
8922 msgMaker.Make(NetMsgType::CMPCTBLOCK, cmpctblock));
8923 }
8924 state.pindexBestHeaderSent = pBestIndex;
8925 } else if (peer->m_prefers_headers) {
8926 if (vHeaders.size() > 1) {
8928 "%s: %u headers, range (%s, %s), to peer=%d\n",
8929 __func__, vHeaders.size(),
8930 vHeaders.front().GetHash().ToString(),
8931 vHeaders.back().GetHash().ToString(),
8932 pto->GetId());
8933 } else {
8935 "%s: sending header %s to peer=%d\n", __func__,
8936 vHeaders.front().GetHash().ToString(),
8937 pto->GetId());
8938 }
8939 m_connman.PushMessage(
8940 pto, msgMaker.Make(NetMsgType::HEADERS, vHeaders));
8941 state.pindexBestHeaderSent = pBestIndex;
8942 } else {
8943 fRevertToInv = true;
8944 }
8945 }
8946 if (fRevertToInv) {
8947 // If falling back to using an inv, just try to inv the tip. The
8948 // last entry in m_blocks_for_headers_relay was our tip at some
8949 // point in the past.
8950 if (!peer->m_blocks_for_headers_relay.empty()) {
8951 const BlockHash &hashToAnnounce =
8952 peer->m_blocks_for_headers_relay.back();
8953 const CBlockIndex *pindex =
8954 m_chainman.m_blockman.LookupBlockIndex(hashToAnnounce);
8955 assert(pindex);
8956
8957 // Warn if we're announcing a block that is not on the main
8958 // chain. This should be very rare and could be optimized
8959 // out. Just log for now.
8960 if (m_chainman.ActiveChain()[pindex->nHeight] != pindex) {
8961 LogPrint(
8962 BCLog::NET,
8963 "Announcing block %s not on main chain (tip=%s)\n",
8964 hashToAnnounce.ToString(),
8965 m_chainman.ActiveChain()
8966 .Tip()
8967 ->GetBlockHash()
8968 .ToString());
8969 }
8970
8971 // If the peer's chain has this block, don't inv it back.
8972 if (!PeerHasHeader(&state, pindex)) {
8973 peer->m_blocks_for_inv_relay.push_back(hashToAnnounce);
8975 "%s: sending inv peer=%d hash=%s\n", __func__,
8976 pto->GetId(), hashToAnnounce.ToString());
8977 }
8978 }
8979 }
8980 peer->m_blocks_for_headers_relay.clear();
8981 }
8982 } // release cs_main
8983
8984 //
8985 // Message: inventory
8986 //
8987 std::vector<CInv> vInv;
8988 auto addInvAndMaybeFlush = [&](uint32_t type, const uint256 &hash) {
8989 vInv.emplace_back(type, hash);
8990 if (vInv.size() == MAX_INV_SZ) {
8991 m_connman.PushMessage(
8992 pto, msgMaker.Make(NetMsgType::INV, std::move(vInv)));
8993 vInv.clear();
8994 }
8995 };
8996
8997 {
8998 LOCK(cs_main);
8999
9000 {
9001 LOCK(peer->m_block_inv_mutex);
9002
9003 vInv.reserve(std::max<size_t>(peer->m_blocks_for_inv_relay.size(),
9005 config.GetMaxBlockSize() /
9006 1000000));
9007
9008 // Add blocks
9009 for (const BlockHash &hash : peer->m_blocks_for_inv_relay) {
9010 addInvAndMaybeFlush(MSG_BLOCK, hash);
9011 }
9012 peer->m_blocks_for_inv_relay.clear();
9013 }
9014
9015 auto computeNextInvSendTime =
9016 [&](std::chrono::microseconds &next)
9017 EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex) -> bool {
9018 bool fSendTrickle = pto->HasPermission(NetPermissionFlags::NoBan);
9019
9020 if (next < current_time) {
9021 fSendTrickle = true;
9022 if (pto->IsInboundConn()) {
9023 next = NextInvToInbounds(
9025 } else {
9026 // Skip delay for outbound peers, as there is less privacy
9027 // concern for them.
9028 next = current_time;
9029 }
9030 }
9031
9032 return fSendTrickle;
9033 };
9034
9035 // Add proofs to inventory
9036 if (peer->m_proof_relay != nullptr) {
9037 LOCK(peer->m_proof_relay->m_proof_inventory_mutex);
9038
9039 if (computeNextInvSendTime(
9040 peer->m_proof_relay->m_next_inv_send_time)) {
9041 auto it =
9042 peer->m_proof_relay->m_proof_inventory_to_send.begin();
9043 while (it !=
9044 peer->m_proof_relay->m_proof_inventory_to_send.end()) {
9045 const avalanche::ProofId proofid = *it;
9046
9047 it = peer->m_proof_relay->m_proof_inventory_to_send.erase(
9048 it);
9049
9050 if (peer->m_proof_relay->m_proof_inventory_known_filter
9051 .contains(proofid)) {
9052 continue;
9053 }
9054
9055 peer->m_proof_relay->m_proof_inventory_known_filter.insert(
9056 proofid);
9057 addInvAndMaybeFlush(MSG_AVA_PROOF, proofid);
9058 peer->m_proof_relay->m_recently_announced_proofs.insert(
9059 proofid);
9060 }
9061 }
9062 }
9063
9064 if (auto tx_relay = peer->GetTxRelay()) {
9065 LOCK(tx_relay->m_tx_inventory_mutex);
9066 // Check whether periodic sends should happen
9067 const bool fSendTrickle =
9068 computeNextInvSendTime(tx_relay->m_next_inv_send_time);
9069
9070 // Time to send but the peer has requested we not relay
9071 // transactions.
9072 if (fSendTrickle) {
9073 LOCK(tx_relay->m_bloom_filter_mutex);
9074 if (!tx_relay->m_relay_txs) {
9075 tx_relay->m_tx_inventory_to_send.clear();
9076 }
9077 }
9078
9079 // Respond to BIP35 mempool requests
9080 if (fSendTrickle && tx_relay->m_send_mempool) {
9081 auto vtxinfo = m_mempool.infoAll();
9082 tx_relay->m_send_mempool = false;
9083 const CFeeRate filterrate{
9084 tx_relay->m_fee_filter_received.load()};
9085
9086 LOCK(tx_relay->m_bloom_filter_mutex);
9087
9088 for (const auto &txinfo : vtxinfo) {
9089 const TxId &txid = txinfo.tx->GetId();
9090 tx_relay->m_tx_inventory_to_send.erase(txid);
9091 // Don't send transactions that peers will not put into
9092 // their mempool
9093 if (txinfo.fee < filterrate.GetFee(txinfo.vsize)) {
9094 continue;
9095 }
9096 if (tx_relay->m_bloom_filter &&
9097 !tx_relay->m_bloom_filter->IsRelevantAndUpdate(
9098 *txinfo.tx)) {
9099 continue;
9100 }
9101 tx_relay->m_tx_inventory_known_filter.insert(txid);
9102 // Responses to MEMPOOL requests bypass the
9103 // m_recently_announced_invs filter.
9104 addInvAndMaybeFlush(MSG_TX, txid);
9105 }
9106 tx_relay->m_last_mempool_req =
9107 std::chrono::duration_cast<std::chrono::seconds>(
9108 current_time);
9109 }
9110
9111 // Determine transactions to relay
9112 if (fSendTrickle) {
9113 // Produce a vector with all candidates for sending
9114 std::vector<std::set<TxId>::iterator> vInvTx;
9115 vInvTx.reserve(tx_relay->m_tx_inventory_to_send.size());
9116 for (std::set<TxId>::iterator it =
9117 tx_relay->m_tx_inventory_to_send.begin();
9118 it != tx_relay->m_tx_inventory_to_send.end(); it++) {
9119 vInvTx.push_back(it);
9120 }
9121 const CFeeRate filterrate{
9122 tx_relay->m_fee_filter_received.load()};
9123 // Send out the inventory in the order of admission to our
9124 // mempool, which is guaranteed to be a topological sort order.
9125 // A heap is used so that not all items need sorting if only a
9126 // few are being sent.
9127 CompareInvMempoolOrder compareInvMempoolOrder(&m_mempool);
9128 std::make_heap(vInvTx.begin(), vInvTx.end(),
9129 compareInvMempoolOrder);
9130 // No reason to drain out at many times the network's
9131 // capacity, especially since we have many peers and some
9132 // will draw much shorter delays.
9133 unsigned int nRelayedTransactions = 0;
9134 LOCK(tx_relay->m_bloom_filter_mutex);
9135 while (!vInvTx.empty() &&
9136 nRelayedTransactions < INVENTORY_BROADCAST_MAX_PER_MB *
9137 config.GetMaxBlockSize() /
9138 1000000) {
9139 // Fetch the top element from the heap
9140 std::pop_heap(vInvTx.begin(), vInvTx.end(),
9141 compareInvMempoolOrder);
9142 std::set<TxId>::iterator it = vInvTx.back();
9143 vInvTx.pop_back();
9144 const TxId txid = *it;
9145 // Remove it from the to-be-sent set
9146 tx_relay->m_tx_inventory_to_send.erase(it);
9147 // Check if not in the filter already
9148 if (tx_relay->m_tx_inventory_known_filter.contains(txid) &&
9149 tx_relay->m_avalanche_stalled_txids.count(txid) == 0) {
9150 continue;
9151 }
9152 // Not in the mempool anymore? don't bother sending it.
9153 auto txinfo = m_mempool.info(txid);
9154 if (!txinfo.tx) {
9155 continue;
9156 }
9157 // Peer told you to not send transactions at that
9158 // feerate? Don't bother sending it.
9159 if (txinfo.fee < filterrate.GetFee(txinfo.vsize)) {
9160 continue;
9161 }
9162 if (tx_relay->m_bloom_filter &&
9163 !tx_relay->m_bloom_filter->IsRelevantAndUpdate(
9164 *txinfo.tx)) {
9165 continue;
9166 }
9167 // Send
9168 tx_relay->m_recently_announced_invs.insert(txid);
9169 addInvAndMaybeFlush(MSG_TX, txid);
9170 nRelayedTransactions++;
9171 tx_relay->m_tx_inventory_known_filter.insert(txid);
9172 tx_relay->m_avalanche_stalled_txids.erase(txid);
9173 }
9174 }
9175 }
9176 } // release cs_main
9177
9178 if (!vInv.empty()) {
9179 m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::INV, vInv));
9180 }
9181
9182 {
9183 LOCK(cs_main);
9184
9185 CNodeState &state = *State(pto->GetId());
9186
9187 // Detect whether we're stalling
9188 auto stalling_timeout = m_block_stalling_timeout.load();
9189 if (state.m_stalling_since.count() &&
9190 state.m_stalling_since < current_time - stalling_timeout) {
9191 // Stalling only triggers when the block download window cannot
9192 // move. During normal steady state, the download window should be
9193 // much larger than the to-be-downloaded set of blocks, so
9194 // disconnection should only happen during initial block download.
9195 LogPrintf("Peer=%d is stalling block download, disconnecting\n",
9196 pto->GetId());
9197 pto->fDisconnect = true;
9198 // Increase timeout for the next peer so that we don't disconnect
9199 // multiple peers if our own bandwidth is insufficient.
9200 const auto new_timeout =
9201 std::min(2 * stalling_timeout, BLOCK_STALLING_TIMEOUT_MAX);
9202 if (stalling_timeout != new_timeout &&
9203 m_block_stalling_timeout.compare_exchange_strong(
9204 stalling_timeout, new_timeout)) {
9205 LogPrint(
9206 BCLog::NET,
9207 "Increased stalling timeout temporarily to %d seconds\n",
9208 count_seconds(new_timeout));
9209 }
9210 return true;
9211 }
9212 // In case there is a block that has been in flight from this peer for
9213 // block_interval * (1 + 0.5 * N) (with N the number of peers from which
9214 // we're downloading validated blocks), disconnect due to timeout.
9215 // We compensate for other peers to prevent killing off peers due to our
9216 // own downstream link being saturated. We only count validated
9217 // in-flight blocks so peers can't advertise non-existing block hashes
9218 // to unreasonably increase our timeout.
9219 if (state.vBlocksInFlight.size() > 0) {
9220 QueuedBlock &queuedBlock = state.vBlocksInFlight.front();
9221 int nOtherPeersWithValidatedDownloads =
9222 m_peers_downloading_from - 1;
9223 if (current_time >
9224 state.m_downloading_since +
9225 std::chrono::seconds{consensusParams.nPowTargetSpacing} *
9228 nOtherPeersWithValidatedDownloads)) {
9229 LogPrintf("Timeout downloading block %s from peer=%d, "
9230 "disconnecting\n",
9231 queuedBlock.pindex->GetBlockHash().ToString(),
9232 pto->GetId());
9233 pto->fDisconnect = true;
9234 return true;
9235 }
9236 }
9237
9238 // Check for headers sync timeouts
9239 if (state.fSyncStarted &&
9240 peer->m_headers_sync_timeout < std::chrono::microseconds::max()) {
9241 // Detect whether this is a stalling initial-headers-sync peer
9242 if (m_chainman.m_best_header->Time() <= GetAdjustedTime() - 24h) {
9243 if (current_time > peer->m_headers_sync_timeout &&
9244 nSyncStarted == 1 &&
9245 (m_num_preferred_download_peers -
9246 state.fPreferredDownload >=
9247 1)) {
9248 // Disconnect a peer (without NetPermissionFlags::NoBan
9249 // permission) if it is our only sync peer, and we have
9250 // others we could be using instead. Note: If all our peers
9251 // are inbound, then we won't disconnect our sync peer for
9252 // stalling; we have bigger problems if we can't get any
9253 // outbound peers.
9255 LogPrintf("Timeout downloading headers from peer=%d, "
9256 "disconnecting\n",
9257 pto->GetId());
9258 pto->fDisconnect = true;
9259 return true;
9260 } else {
9261 LogPrintf("Timeout downloading headers from noban "
9262 "peer=%d, not disconnecting\n",
9263 pto->GetId());
9264 // Reset the headers sync state so that we have a chance
9265 // to try downloading from a different peer. Note: this
9266 // will also result in at least one more getheaders
9267 // message to be sent to this peer (eventually).
9268 state.fSyncStarted = false;
9269 nSyncStarted--;
9270 peer->m_headers_sync_timeout = 0us;
9271 }
9272 }
9273 } else {
9274 // After we've caught up once, reset the timeout so we can't
9275 // trigger disconnect later.
9276 peer->m_headers_sync_timeout = std::chrono::microseconds::max();
9277 }
9278 }
9279
9280 // Check that outbound peers have reasonable chains GetTime() is used by
9281 // this anti-DoS logic so we can test this using mocktime.
9282 ConsiderEviction(*pto, *peer, GetTime<std::chrono::seconds>());
9283 } // release cs_main
9284
9285 std::vector<CInv> vGetData;
9286
9287 //
9288 // Message: getdata (blocks)
9289 //
9290 {
9291 LOCK(cs_main);
9292
9293 CNodeState &state = *State(pto->GetId());
9294
9295 if (CanServeBlocks(*peer) &&
9296 ((sync_blocks_and_headers_from_peer && !IsLimitedPeer(*peer)) ||
9297 !m_chainman.IsInitialBlockDownload()) &&
9298 state.vBlocksInFlight.size() < MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
9299 std::vector<const CBlockIndex *> vToDownload;
9300 NodeId staller = -1;
9301 auto get_inflight_budget = [&state]() {
9302 return std::max(
9304 static_cast<int>(state.vBlocksInFlight.size()));
9305 };
9306
9307 // If a snapshot chainstate is in use, we want to find its next
9308 // blocks before the background chainstate to prioritize getting to
9309 // network tip.
9310 FindNextBlocksToDownload(*peer, get_inflight_budget(), vToDownload,
9311 staller);
9312 if (m_chainman.BackgroundSyncInProgress() &&
9313 !IsLimitedPeer(*peer)) {
9314 // If the background tip is not an ancestor of the snapshot
9315 // block, we need to start requesting blocks from their last
9316 // common ancestor.
9317 const CBlockIndex *from_tip =
9319 m_chainman.GetSnapshotBaseBlock());
9320
9321 TryDownloadingHistoricalBlocks(
9322 *peer, get_inflight_budget(), vToDownload, from_tip,
9323 Assert(m_chainman.GetSnapshotBaseBlock()));
9324 }
9325 for (const CBlockIndex *pindex : vToDownload) {
9326 vGetData.push_back(CInv(MSG_BLOCK, pindex->GetBlockHash()));
9327 BlockRequested(config, pto->GetId(), *pindex);
9328 LogPrint(BCLog::NET, "Requesting block %s (%d) peer=%d\n",
9329 pindex->GetBlockHash().ToString(), pindex->nHeight,
9330 pto->GetId());
9331 }
9332 if (state.vBlocksInFlight.empty() && staller != -1) {
9333 if (State(staller)->m_stalling_since == 0us) {
9334 State(staller)->m_stalling_since = current_time;
9335 LogPrint(BCLog::NET, "Stall started peer=%d\n", staller);
9336 }
9337 }
9338 }
9339 } // release cs_main
9340
9341 auto addGetDataAndMaybeFlush = [&](uint32_t type, const uint256 &hash) {
9342 CInv inv(type, hash);
9343 LogPrint(BCLog::NET, "Requesting %s from peer=%d\n", inv.ToString(),
9344 pto->GetId());
9345 vGetData.push_back(std::move(inv));
9346 if (vGetData.size() >= MAX_GETDATA_SZ) {
9347 m_connman.PushMessage(
9348 pto, msgMaker.Make(NetMsgType::GETDATA, std::move(vGetData)));
9349 vGetData.clear();
9350 }
9351 };
9352
9353 //
9354 // Message: getdata (proof)
9355 //
9356 if (m_avalanche) {
9357 LOCK(cs_proofrequest);
9358 std::vector<std::pair<NodeId, avalanche::ProofId>> expired;
9359 auto requestable =
9360 m_proofrequest.GetRequestable(pto->GetId(), current_time, &expired);
9361 for (const auto &entry : expired) {
9363 "timeout of inflight proof %s from peer=%d\n",
9364 entry.second.ToString(), entry.first);
9365 }
9366 for (const auto &proofid : requestable) {
9367 if (!AlreadyHaveProof(proofid)) {
9368 addGetDataAndMaybeFlush(MSG_AVA_PROOF, proofid);
9369 m_proofrequest.RequestedData(
9370 pto->GetId(), proofid,
9371 current_time + PROOF_REQUEST_PARAMS.getdata_interval);
9372 } else {
9373 // We have already seen this proof, no need to download.
9374 // This is just a belt-and-suspenders, as this should
9375 // already be called whenever a proof becomes
9376 // AlreadyHaveProof().
9377 m_proofrequest.ForgetInvId(proofid);
9378 }
9379 }
9380 }
9381
9382 //
9383 // Message: getdata (transactions)
9384 //
9385 {
9386 LOCK(cs_main);
9387 std::vector<std::pair<NodeId, TxId>> expired;
9388 auto requestable =
9389 m_txrequest.GetRequestable(pto->GetId(), current_time, &expired);
9390 for (const auto &entry : expired) {
9391 LogPrint(BCLog::NET, "timeout of inflight tx %s from peer=%d\n",
9392 entry.second.ToString(), entry.first);
9393 }
9394 for (const TxId &txid : requestable) {
9395 // Exclude m_recent_rejects_package_reconsiderable: we may be
9396 // requesting a missing parent that was previously rejected for
9397 // being too low feerate.
9398 if (!AlreadyHaveTx(txid, /*include_reconsiderable=*/false)) {
9399 addGetDataAndMaybeFlush(MSG_TX, txid);
9400 m_txrequest.RequestedData(
9401 pto->GetId(), txid,
9402 current_time + TX_REQUEST_PARAMS.getdata_interval);
9403 } else {
9404 // We have already seen this transaction, no need to download.
9405 // This is just a belt-and-suspenders, as this should already be
9406 // called whenever a transaction becomes AlreadyHaveTx().
9407 m_txrequest.ForgetInvId(txid);
9408 }
9409 }
9410
9411 if (!vGetData.empty()) {
9412 m_connman.PushMessage(pto,
9413 msgMaker.Make(NetMsgType::GETDATA, vGetData));
9414 }
9415
9416 } // release cs_main
9417 MaybeSendFeefilter(*pto, *peer, current_time);
9418 return true;
9419}
9420
9421bool PeerManagerImpl::ReceivedAvalancheProof(CNode &node, Peer &peer,
9422 const avalanche::ProofRef &proof) {
9423 assert(proof != nullptr);
9424
9425 const avalanche::ProofId &proofid = proof->getId();
9426
9427 AddKnownProof(peer, proofid);
9428
9429 if (m_chainman.IsInitialBlockDownload()) {
9430 // We cannot reliably verify proofs during IBD, so bail out early and
9431 // keep the inventory as pending so it can be requested when the node
9432 // has synced.
9433 return true;
9434 }
9435
9436 const NodeId nodeid = node.GetId();
9437
9438 const bool isStaker = WITH_LOCK(node.cs_avalanche_pubkey,
9439 return node.m_avalanche_pubkey.has_value());
9440 auto saveProofIfStaker = [this, isStaker](const CNode &node,
9441 const avalanche::ProofId &proofid,
9442 const NodeId nodeid) -> bool {
9443 if (isStaker) {
9444 return m_avalanche->withPeerManager(
9445 [&](avalanche::PeerManager &pm) {
9446 return pm.saveRemoteProof(proofid, nodeid, true);
9447 });
9448 }
9449
9450 return false;
9451 };
9452
9453 {
9454 LOCK(cs_proofrequest);
9455 m_proofrequest.ReceivedResponse(nodeid, proofid);
9456
9457 if (AlreadyHaveProof(proofid)) {
9458 m_proofrequest.ForgetInvId(proofid);
9459 saveProofIfStaker(node, proofid, nodeid);
9460 return true;
9461 }
9462 }
9463
9464 // registerProof should not be called while cs_proofrequest because it
9465 // holds cs_main and that creates a potential deadlock during shutdown
9466
9468 if (m_avalanche->withPeerManager([&](avalanche::PeerManager &pm) {
9469 return pm.registerProof(proof, state);
9470 })) {
9471 WITH_LOCK(cs_proofrequest, m_proofrequest.ForgetInvId(proofid));
9472 RelayProof(proofid);
9473
9474 node.m_last_proof_time = GetTime<std::chrono::seconds>();
9475
9476 LogPrint(BCLog::NET, "New avalanche proof: peer=%d, proofid %s\n",
9477 nodeid, proofid.ToString());
9478 }
9479
9481 m_avalanche->withPeerManager(
9482 [&](avalanche::PeerManager &pm) { pm.setInvalid(proofid); });
9483 Misbehaving(peer, state.GetRejectReason());
9484 return false;
9485 }
9486
9488 // This is possible that a proof contains a utxo we don't know yet, so
9489 // don't ban for this.
9490 return false;
9491 }
9492
9493 // Unlike other reasons we can expect lots of peers to send a proof that we
9494 // have dangling. In this case we don't want to print a lot of useless debug
9495 // message, the proof will be polled as soon as it's considered again.
9496 if (!m_avalanche->reconcileOrFinalize(proof) &&
9499 "Not polling the avalanche proof (%s): peer=%d, proofid %s\n",
9500 state.IsValid() ? "not-worth-polling"
9501 : state.GetRejectReason(),
9502 nodeid, proofid.ToString());
9503 }
9504
9505 saveProofIfStaker(node, proofid, nodeid);
9506 return true;
9507}
bool MoneyRange(const Amount nValue)
Definition: amount.h:171
static constexpr Amount MAX_MONEY
No amount larger than this (in satoshi) is valid.
Definition: amount.h:170
@ READ_STATUS_OK
@ READ_STATUS_INVALID
@ READ_STATUS_FAILED
enum ReadStatus_t ReadStatus
const std::string & BlockFilterTypeName(BlockFilterType filter_type)
Get the human-readable name for a filter type.
BlockFilterType
Definition: blockfilter.h:88
BlockFilterIndex * GetBlockFilterIndex(BlockFilterType filter_type)
Get a block filter index by type.
static constexpr int CFCHECKPT_INTERVAL
Interval between compact filter checkpoints.
@ CHAIN
Outputs do not overspend inputs, no double spends, coinbase output ok, no immature coinbase spends,...
@ TRANSACTIONS
Only first tx is coinbase, 2 <= coinbase input script length <= 100, transactions valid,...
@ SCRIPTS
Scripts & signatures ok.
@ TREE
All parent headers found, difficulty matches, timestamp >= median previous, checkpoint.
arith_uint256 GetBlockProof(const CBlockIndex &block)
Definition: chain.cpp:74
CBlockLocator GetLocator(const CBlockIndex *index)
Get a locator for a block index entry.
Definition: chain.cpp:41
int64_t GetBlockProofEquivalentTime(const CBlockIndex &to, const CBlockIndex &from, const CBlockIndex &tip, const Consensus::Params &params)
Return the time it would take to redo the work difference between from and to, assuming the current h...
Definition: chain.cpp:89
const CBlockIndex * LastCommonAncestor(const CBlockIndex *pa, const CBlockIndex *pb)
Find the last common ancestor two blocks have.
Definition: chain.cpp:112
#define Assert(val)
Identity function.
Definition: check.h:84
#define Assume(val)
Assume is the identity function.
Definition: check.h:97
Stochastic address manager.
Definition: addrman.h:68
void Connected(const CService &addr, NodeSeconds time=Now< NodeSeconds >())
We have successfully connected to this peer.
Definition: addrman.cpp:1319
void Good(const CService &addr, bool test_before_evict=true, NodeSeconds time=Now< NodeSeconds >())
Mark an entry as accessible, possibly moving it from "new" to "tried".
Definition: addrman.cpp:1292
bool Add(const std::vector< CAddress > &vAddr, const CNetAddr &source, std::chrono::seconds time_penalty=0s)
Attempt to add one or more addresses to addrman's new table.
Definition: addrman.cpp:1287
void SetServices(const CService &addr, ServiceFlags nServices)
Update an entry's service bits.
Definition: addrman.cpp:1323
Definition: banman.h:59
void Discourage(const CNetAddr &net_addr)
Definition: banman.cpp:116
bool IsBanned(const CNetAddr &net_addr)
Return whether net_addr is banned.
Definition: banman.cpp:83
bool IsDiscouraged(const CNetAddr &net_addr)
Return whether net_addr is discouraged.
Definition: banman.cpp:78
BlockFilterIndex is used to store and retrieve block filters, hashes, and headers for a range of bloc...
bool LookupFilterRange(int start_height, const CBlockIndex *stop_index, std::vector< BlockFilter > &filters_out) const
Get a range of filters between two heights on a chain.
bool LookupFilterHashRange(int start_height, const CBlockIndex *stop_index, std::vector< uint256 > &hashes_out) const
Get a range of filter hashes between two heights on a chain.
bool LookupFilterHeader(const CBlockIndex *block_index, uint256 &header_out) EXCLUSIVE_LOCKS_REQUIRED(!m_cs_headers_cache)
Get a single filter header by block.
std::vector< CTransactionRef > txn
std::vector< uint32_t > indices
A CService with information about it as peer.
Definition: protocol.h:443
ServiceFlags nServices
Serialized as uint64_t in V1, and as CompactSize in V2.
Definition: protocol.h:555
static constexpr SerParams V1_NETWORK
Definition: protocol.h:496
NodeSeconds nTime
Always included in serialization, except in the network format on INIT_PROTO_VERSION.
Definition: protocol.h:553
static constexpr SerParams V2_NETWORK
Definition: protocol.h:498
Nodes collect new transactions into a block, hash them into a hash tree, and scan through nonce value...
Definition: block.h:23
BlockHash GetHash() const
Definition: block.cpp:12
uint32_t nTime
Definition: block.h:29
BlockHash hashPrevBlock
Definition: block.h:27
Definition: block.h:60
std::vector< CTransactionRef > vtx
Definition: block.h:63
The block chain is a tree shaped structure starting with the genesis block at the root,...
Definition: blockindex.h:25
bool IsValid(enum BlockValidity nUpTo=BlockValidity::TRANSACTIONS) const EXCLUSIVE_LOCKS_REQUIRED(
Check whether this block index entry is valid up to the passed validity level.
Definition: blockindex.h:191
CBlockIndex * pprev
pointer to the index of the predecessor of this block
Definition: blockindex.h:32
CBlockHeader GetBlockHeader() const
Definition: blockindex.h:117
arith_uint256 nChainWork
(memory only) Total amount of work (expected number of hashes) in the chain up to and including this ...
Definition: blockindex.h:51
bool HaveNumChainTxs() const
Check whether this block and all previous blocks back to the genesis block or an assumeutxo snapshot ...
Definition: blockindex.h:154
int64_t GetBlockTime() const
Definition: blockindex.h:160
unsigned int nTx
Number of transactions in this block.
Definition: blockindex.h:55
NodeSeconds Time() const
Definition: blockindex.h:156
CBlockIndex * GetAncestor(int height)
Efficiently find an ancestor of this block.
Definition: blockindex.cpp:62
BlockHash GetBlockHash() const
Definition: blockindex.h:130
int nHeight
height of the entry in the chain. The genesis block has height 0
Definition: blockindex.h:38
FlatFilePos GetBlockPos() const EXCLUSIVE_LOCKS_REQUIRED(
Definition: blockindex.h:97
BloomFilter is a probabilistic filter which SPV clients provide so that we can filter the transaction...
Definition: bloom.h:44
bool IsWithinSizeConstraints() const
True if the size is <= MAX_BLOOM_FILTER_SIZE and the number of hash functions is <= MAX_HASH_FUNCS (c...
Definition: bloom.cpp:93
An in-memory indexed chain of blocks.
Definition: chain.h:138
CBlockIndex * Tip() const
Returns the index entry for the tip of this chain, or nullptr if none.
Definition: chain.h:154
CBlockIndex * Next(const CBlockIndex *pindex) const
Find the successor of a block in this chain, or nullptr if the given index is not found or is the tip...
Definition: chain.h:178
int Height() const
Return the maximal height in the chain.
Definition: chain.h:190
bool Contains(const CBlockIndex *pindex) const
Efficiently check whether a block is present in this chain.
Definition: chain.h:170
CChainParams defines various tweakable parameters of a given instance of the Bitcoin system.
Definition: chainparams.h:86
const CBlock & GenesisBlock() const
Definition: chainparams.h:112
const Consensus::Params & GetConsensus() const
Definition: chainparams.h:98
CCoinsView that adds a memory cache for transactions to another CCoinsView.
Definition: coins.h:363
CCoinsView that brings transactions from a mempool into view.
Definition: txmempool.h:647
Definition: net.h:824
void ForEachNode(const NodeFn &func)
Definition: net.h:929
bool OutboundTargetReached(bool historicalBlockServingLimit) const
check if the outbound target is reached.
Definition: net.cpp:2894
bool ForNode(NodeId id, std::function< bool(CNode *pnode)> func)
Definition: net.cpp:3089
bool GetNetworkActive() const
Definition: net.h:916
bool GetTryNewOutboundPeer() const
Definition: net.cpp:1594
void SetTryNewOutboundPeer(bool flag)
Definition: net.cpp:1598
int GetExtraBlockRelayCount() const
Definition: net.cpp:1626
void WakeMessageHandler() EXCLUSIVE_LOCKS_REQUIRED(!mutexMsgProc)
Definition: net.cpp:1424
void StartExtraBlockRelayPeers()
Definition: net.h:974
bool DisconnectNode(const std::string &node)
Definition: net.cpp:2805
CSipHasher GetDeterministicRandomizer(uint64_t id) const
Get a unique deterministic randomizer.
Definition: net.cpp:3101
int GetExtraFullOutboundCount() const
Definition: net.cpp:1610
std::vector< CAddress > GetAddresses(size_t max_addresses, size_t max_pct, std::optional< Network > network) const
Return all or many randomly selected addresses, optionally by network.
Definition: net.cpp:2673
bool CheckIncomingNonce(uint64_t nonce)
Definition: net.cpp:399
bool ShouldRunInactivityChecks(const CNode &node, std::chrono::seconds now) const
Return true if we should disconnect the peer for failing an inactivity check.
Definition: net.cpp:1205
void PushMessage(CNode *pnode, CSerializedNetMsg &&msg)
Definition: net.cpp:3043
bool GetUseAddrmanOutgoing() const
Definition: net.h:917
Fee rate in satoshis per kilobyte: Amount / kB.
Definition: feerate.h:21
Amount GetFeePerK() const
Return the fee in satoshis for a size of 1000 bytes.
Definition: feerate.h:54
Inv(ventory) message data.
Definition: protocol.h:590
bool IsMsgCmpctBlk() const
Definition: protocol.h:629
bool IsMsgBlk() const
Definition: protocol.h:621
std::string ToString() const
Definition: protocol.cpp:242
uint32_t type
Definition: protocol.h:592
bool IsMsgTx() const
Definition: protocol.h:609
bool IsMsgStakeContender() const
Definition: protocol.h:617
bool IsMsgFilteredBlk() const
Definition: protocol.h:625
uint256 hash
Definition: protocol.h:593
bool IsMsgProof() const
Definition: protocol.h:613
bool IsGenBlkMsg() const
Definition: protocol.h:634
void TransactionInvalidated(const CTransactionRef &tx, std::shared_ptr< const std::vector< Coin > > spent_coins)
Used to create a Merkle proof (usually from a subset of transactions), which consists of a block head...
Definition: merkleblock.h:147
std::vector< std::pair< size_t, uint256 > > vMatchedTxn
Public only for unit testing and relay testing (not relayed).
Definition: merkleblock.h:159
bool IsRelayable() const
Whether this address should be relayed to other peers even if we can't reach it ourselves.
Definition: netaddress.h:245
bool IsRoutable() const
Definition: netaddress.cpp:509
static constexpr SerParams V1
Definition: netaddress.h:255
bool IsValid() const
Definition: netaddress.cpp:474
bool IsLocal() const
Definition: netaddress.cpp:448
@ V2
BIP155 encoding.
bool IsAddrV1Compatible() const
Check if the current object can be serialized in pre-ADDRv2/BIP155 format.
Definition: netaddress.cpp:525
Transport protocol agnostic message container.
Definition: net.h:256
CSerializedNetMsg Make(int nFlags, std::string msg_type, Args &&...args) const
Information about a peer.
Definition: net.h:395
Mutex cs_avalanche_pubkey
Definition: net.h:587
bool IsFeelerConn() const
Definition: net.h:518
const std::chrono::seconds m_connected
Unix epoch time at peer connection.
Definition: net.h:429
bool ExpectServicesFromConn() const
Definition: net.h:532
std::atomic< int > nVersion
Definition: net.h:439
std::atomic_bool m_has_all_wanted_services
Whether this peer provides all services that we want.
Definition: net.h:570
bool IsInboundConn() const
Definition: net.h:524
bool HasPermission(NetPermissionFlags permission) const
Definition: net.h:452
bool IsOutboundOrBlockRelayConn() const
Definition: net.h:491
NodeId GetId() const
Definition: net.h:687
bool IsManualConn() const
Definition: net.h:512
std::atomic< int64_t > nTimeOffset
Definition: net.h:430
const std::string m_addr_name
Definition: net.h:435
std::string ConnectionTypeAsString() const
Definition: net.h:733
void SetCommonVersion(int greatest_common_version)
Definition: net.h:709
std::atomic< bool > m_bip152_highbandwidth_to
Definition: net.h:562
std::atomic_bool m_relays_txs
Whether we should relay transactions to this peer.
Definition: net.h:576
std::atomic< bool > m_bip152_highbandwidth_from
Definition: net.h:564
void PongReceived(std::chrono::microseconds ping_time)
A ping-pong round trip has completed successfully.
Definition: net.h:682
std::atomic_bool fSuccessfullyConnected
Definition: net.h:455
bool IsAddrFetchConn() const
Definition: net.h:520
uint64_t GetLocalNonce() const
Definition: net.h:689
const CAddress addr
Definition: net.h:432
void SetAddrLocal(const CService &addrLocalIn) EXCLUSIVE_LOCKS_REQUIRED(!m_addr_local_mutex)
May not be called more than once.
Definition: net.cpp:612
bool IsBlockOnlyConn() const
Definition: net.h:514
int GetCommonVersion() const
Definition: net.h:713
bool IsFullOutboundConn() const
Definition: net.h:507
uint64_t nRemoteHostNonce
Definition: net.h:441
Mutex m_subver_mutex
cleanSubVer is a sanitized string of the user agent byte array we read from the wire.
Definition: net.h:448
std::atomic_bool fPauseSend
Definition: net.h:464
std::chrono::seconds m_nextGetAvaAddr
Definition: net.h:617
uint64_t nRemoteExtraEntropy
Definition: net.h:443
std::optional< std::pair< CNetMessage, bool > > PollMessage() EXCLUSIVE_LOCKS_REQUIRED(!m_msg_process_queue_mutex)
Poll the next message from the processing queue of this connection.
Definition: net.cpp:3023
uint64_t GetLocalExtraEntropy() const
Definition: net.h:690
SteadyMilliseconds m_last_poll
Definition: net.h:633
double getAvailabilityScore() const
Definition: net.cpp:2964
std::atomic_bool m_bloom_filter_loaded
Whether this peer has loaded a bloom filter.
Definition: net.h:582
void updateAvailabilityScore(double decayFactor)
The availability score is calculated using an exponentially weighted average.
Definition: net.cpp:2949
std::atomic< std::chrono::seconds > m_avalanche_last_message_fault
Definition: net.h:620
const bool m_inbound_onion
Whether this peer is an inbound onion, i.e.
Definition: net.h:438
std::atomic< int > m_avalanche_message_fault_counter
How much faulty messages did this node accumulate.
Definition: net.h:625
std::atomic< bool > m_avalanche_enabled
Definition: net.h:585
std::atomic< std::chrono::seconds > m_last_block_time
UNIX epoch time of the last block received from this peer that we had not yet seen (e....
Definition: net.h:642
std::atomic_bool fDisconnect
Definition: net.h:458
std::atomic< int > m_avalanche_message_fault_score
This score is incremented for every new faulty message received when m_avalanche_message_fault_counte...
Definition: net.h:631
std::atomic< std::chrono::seconds > m_last_tx_time
UNIX epoch time of the last transaction received from this peer that we had not yet seen (e....
Definition: net.h:650
void invsVoted(uint32_t count)
The node voted for count invs.
Definition: net.cpp:2945
bool IsAvalancheOutboundConnection() const
Definition: net.h:528
An encapsulated public key.
Definition: pubkey.h:31
RollingBloomFilter is a probabilistic "keep track of most recently inserted" set.
Definition: bloom.h:115
Simple class for background tasks that should be run periodically or once "after a while".
Definition: scheduler.h:41
void scheduleEvery(Predicate p, std::chrono::milliseconds delta) EXCLUSIVE_LOCKS_REQUIRED(!newTaskMutex)
Repeat p until it return false.
Definition: scheduler.cpp:114
void scheduleFromNow(Function f, std::chrono::milliseconds delta) EXCLUSIVE_LOCKS_REQUIRED(!newTaskMutex)
Call f once after the delta has passed.
Definition: scheduler.h:56
A combination of a network address (CNetAddr) and a (TCP) port.
Definition: netaddress.h:573
std::string ToString() const
std::vector< uint8_t > GetKey() const
SipHash-2-4.
Definition: siphash.h:14
uint64_t Finalize() const
Compute the 64-bit SipHash-2-4 of the data written so far.
Definition: siphash.cpp:83
CSipHasher & Write(uint64_t data)
Hash a 64-bit integer worth of data.
Definition: siphash.cpp:36
std::set< std::reference_wrapper< const CTxMemPoolEntryRef >, CompareIteratorById > Parents
Definition: mempool_entry.h:70
CTxMemPool stores valid-according-to-the-current-best-chain transactions that may be included in the ...
Definition: txmempool.h:221
void removeConflicts(const CTransaction &tx) EXCLUSIVE_LOCKS_REQUIRED(cs)
Definition: txmempool.cpp:301
void RemoveUnbroadcastTx(const TxId &txid, const bool unchecked=false)
Removes a transaction from the unbroadcast set.
Definition: txmempool.cpp:826
CFeeRate GetMinFee() const
The minimum fee to get into the mempool, which may itself not be enough for larger-sized transactions...
Definition: txmempool.h:463
RecursiveMutex cs
This mutex needs to be locked when accessing mapTx or other members that are guarded by it.
Definition: txmempool.h:317
void removeRecursive(const CTransaction &tx, MemPoolRemovalReason reason) EXCLUSIVE_LOCKS_REQUIRED(cs)
Definition: txmempool.cpp:270
bool CompareTopologically(const TxId &txida, const TxId &txidb) const
Definition: txmempool.cpp:504
TxMempoolInfo info(const TxId &txid) const
Definition: txmempool.cpp:687
size_t DynamicMemoryUsage() const
Definition: txmempool.cpp:815
bool setAvalancheFinalized(const CTxMemPoolEntryRef &tx, const Consensus::Params &params, const CBlockIndex &active_chain_tip, std::vector< TxId > &finalizedTxIds) EXCLUSIVE_LOCKS_REQUIRED(bool isAvalancheFinalizedPreConsensus(const TxId &txid) const EXCLUSIVE_LOCKS_REQUIRED(cs)
Definition: txmempool.h:541
std::vector< TxMempoolInfo > infoAll() const
Definition: txmempool.cpp:536
CTransactionRef GetConflictTx(const COutPoint &prevout) const EXCLUSIVE_LOCKS_REQUIRED(cs)
Get the transaction in the pool that spends the same prevout.
Definition: txmempool.cpp:740
bool exists(const TxId &txid) const
Definition: txmempool.h:530
std::set< TxId > GetUnbroadcastTxs() const
Returns transactions in unbroadcast set.
Definition: txmempool.h:569
auto withOrphanage(Callable &&func) const EXCLUSIVE_LOCKS_REQUIRED(!cs_orphanage)
Definition: txmempool.h:590
const CFeeRate m_min_relay_feerate
Definition: txmempool.h:356
auto withConflicting(Callable &&func) const EXCLUSIVE_LOCKS_REQUIRED(!cs_conflicting)
Definition: txmempool.h:598
void removeForFinalizedBlock(const std::unordered_set< TxId, SaltedTxIdHasher > &confirmedTxIdsInNonFinalizedBlocks) EXCLUSIVE_LOCKS_REQUIRED(cs)
Definition: txmempool.cpp:329
unsigned long size() const
Definition: txmempool.h:500
std::optional< txiter > GetIter(const TxId &txid) const EXCLUSIVE_LOCKS_REQUIRED(cs)
Returns an iterator to the given txid, if found.
Definition: txmempool.cpp:745
virtual void NewPoWValidBlock(const CBlockIndex *pindex, const std::shared_ptr< const CBlock > &block)
Notifies listeners that a block which builds directly on our current tip has been received and connec...
virtual void BlockConnected(ChainstateRole role, const std::shared_ptr< const CBlock > &block, const CBlockIndex *pindex)
Notifies listeners of a block being connected.
virtual void BlockChecked(const CBlock &, const BlockValidationState &)
Notifies listeners of a block validation result.
virtual void UpdatedBlockTip(const CBlockIndex *pindexNew, const CBlockIndex *pindexFork, bool fInitialDownload)
Notifies listeners when the block chain tip advances.
virtual void BlockDisconnected(const std::shared_ptr< const CBlock > &block, const CBlockIndex *pindex)
Notifies listeners of a block being disconnected.
Provides an interface for creating and interacting with one or two chainstates: an IBD chainstate gen...
Definition: validation.h:1186
SnapshotCompletionResult MaybeCompleteSnapshotValidation() EXCLUSIVE_LOCKS_REQUIRED(const CBlockIndex *GetSnapshotBaseBlock() const EXCLUSIVE_LOCKS_REQUIRED(Chainstate ActiveChainstate)() const
Once the background validation chainstate has reached the height which is the base of the UTXO snapsh...
Definition: validation.h:1437
const CBlockIndex * GetBackgroundSyncTip() const EXCLUSIVE_LOCKS_REQUIRED(GetMutex())
The tip of the background sync chain.
Definition: validation.h:1457
MempoolAcceptResult ProcessTransaction(const CTransactionRef &tx, bool test_accept=false) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
Try to add a transaction to the memory pool.
bool IsInitialBlockDownload() const
Check whether we are doing an initial block download (synchronizing from disk or network)
bool ProcessNewBlock(const std::shared_ptr< const CBlock > &block, bool force_processing, bool min_pow_checked, bool *new_block, avalanche::Processor *const avalanche=nullptr) LOCKS_EXCLUDED(cs_main)
Process an incoming block.
RecursiveMutex & GetMutex() const LOCK_RETURNED(
Alias for cs_main.
Definition: validation.h:1318
CBlockIndex * ActiveTip() const EXCLUSIVE_LOCKS_REQUIRED(GetMutex())
Definition: validation.h:1444
bool BackgroundSyncInProgress() const EXCLUSIVE_LOCKS_REQUIRED(GetMutex())
The state of a background sync (for net processing)
Definition: validation.h:1451
bool ProcessNewBlockHeaders(const std::vector< CBlockHeader > &block, bool min_pow_checked, BlockValidationState &state, const CBlockIndex **ppindex=nullptr, const std::optional< CCheckpointData > &test_checkpoints=std::nullopt) LOCKS_EXCLUDED(cs_main)
Process incoming block headers.
const arith_uint256 & MinimumChainWork() const
Definition: validation.h:1288
CChain & ActiveChain() const EXCLUSIVE_LOCKS_REQUIRED(GetMutex())
Definition: validation.h:1438
void MaybeRebalanceCaches() EXCLUSIVE_LOCKS_REQUIRED(void ReportHeadersPresync(const arith_uint256 &work, int64_t height, int64_t timestamp)
Check to see if caches are out of balance and if so, call ResizeCoinsCaches() as needed.
node::BlockManager m_blockman
A single BlockManager instance is shared across each constructed chainstate to avoid duplicating bloc...
Definition: validation.h:1327
Definition: config.h:19
virtual uint64_t GetMaxBlockSize() const =0
bool empty() const
Definition: streams.h:199
size_type size() const
Definition: streams.h:198
void ignore(size_t num_ignore)
Definition: streams.h:323
int in_avail() const
Definition: streams.h:302
Fast randomness source.
Definition: random.h:411
uint64_t rand64() noexcept
Generate a random 64-bit integer.
Definition: random.h:432
Reads data from an underlying stream, while hashing the read data.
Definition: hash.h:166
A writer stream (for serialization) that computes a 256-bit hash.
Definition: hash.h:100
HeadersSyncState:
Definition: headerssync.h:98
@ FINAL
We're done syncing with this peer and can discard any remaining state.
@ PRESYNC
PRESYNC means the peer has not yet demonstrated their chain has sufficient work and we're only buildi...
size_t Count(NodeId peer) const
Count how many announcements a peer has (REQUESTED, CANDIDATE, and COMPLETED combined).
Definition: invrequest.h:309
size_t CountInFlight(NodeId peer) const
Count how many REQUESTED announcements a peer has.
Definition: invrequest.h:296
Interface for message handling.
Definition: net.h:773
static Mutex g_msgproc_mutex
Mutex for anything that is only accessed via the msg processing thread.
Definition: net.h:778
virtual bool ProcessMessages(const Config &config, CNode *pnode, std::atomic< bool > &interrupt) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex)=0
Process protocol messages received from a given node.
virtual bool SendMessages(const Config &config, CNode *pnode) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex)=0
Send queued protocol messages to a given node.
virtual void InitializeNode(const Config &config, CNode &node, ServiceFlags our_services)=0
Initialize a peer (setup state, queue any initial messages)
virtual void FinalizeNode(const Config &config, const CNode &node)=0
Handle removal of a peer (clear state)
static bool HasFlag(NetPermissionFlags flags, NetPermissionFlags f)
ReadStatus InitData(const CBlockHeaderAndShortTxIDs &cmpctblock, const std::vector< std::pair< TxHash, CTransactionRef > > &extra_txn)
bool IsTxAvailable(size_t index) const
ReadStatus FillBlock(CBlock &block, const std::vector< CTransactionRef > &vtx_missing)
virtual std::optional< std::string > FetchBlock(const Config &config, NodeId peer_id, const CBlockIndex &block_index)=0
Attempt to manually fetch block from a given peer.
virtual void SendPings()=0
Send ping message to all peers.
static std::unique_ptr< PeerManager > make(CConnman &connman, AddrMan &addrman, BanMan *banman, ChainstateManager &chainman, CTxMemPool &pool, avalanche::Processor *const avalanche, Options opts)
virtual void ProcessMessage(const Config &config, CNode &pfrom, const std::string &msg_type, CDataStream &vRecv, const std::chrono::microseconds time_received, const std::atomic< bool > &interruptMsgProc) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex)=0
Process a single message from a peer.
virtual void StartScheduledTasks(CScheduler &scheduler)=0
Begin running background tasks, should only be called once.
virtual bool IgnoresIncomingTxs()=0
Whether this node ignores txs received over p2p.
virtual bool GetNodeStateStats(NodeId nodeid, CNodeStateStats &stats) const =0
Get statistics from node state.
virtual void UnitTestMisbehaving(const NodeId peer_id)=0
Public for unit testing.
virtual void UpdateLastBlockAnnounceTime(NodeId node, int64_t time_in_seconds)=0
This function is used for testing the stale tip eviction logic, see denialofservice_tests....
virtual void CheckForStaleTipAndEvictPeers()=0
Evict extra outbound peers.
static RCUPtr make(Args &&...args)
Construct a new object that is owned by the pointer.
Definition: rcu.h:112
I randrange(I range) noexcept
Generate a random integer in the range [0..range), with range > 0.
Definition: random.h:266
A Span is an object that can refer to a contiguous sequence of objects.
Definition: span.h:94
int EraseTx(const TxId &txid) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
Erase a tx by txid.
Definition: txpool.cpp:50
void EraseForPeer(NodeId peer) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
Erase all txs announced by a peer (eg, after that peer disconnects)
Definition: txpool.cpp:94
std::vector< CTransactionRef > GetChildrenFromSamePeer(const CTransactionRef &parent, NodeId nodeid) const EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
Get all children that spend from this tx and were received from nodeid.
Definition: txpool.cpp:281
bool AddTx(const CTransactionRef &tx, NodeId peer) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
Add a new transaction to the pool.
Definition: txpool.cpp:15
unsigned int LimitTxs(unsigned int max_txs, FastRandomContext &rng) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
Limit the txs to the given maximum.
Definition: txpool.cpp:115
void EraseForBlock(const CBlock &block) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
Erase all txs included in or invalidated by a new block.
Definition: txpool.cpp:239
std::vector< CTransactionRef > GetConflictTxs(const CTransactionRef &tx) const EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
Definition: txpool.cpp:191
void AddChildrenToWorkSet(const CTransaction &tx) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
Add any tx that list a particular tx as a parent into the from peer's work set.
Definition: txpool.cpp:151
std::vector< std::pair< CTransactionRef, NodeId > > GetChildrenFromDifferentPeer(const CTransactionRef &parent, NodeId nodeid) const EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
Get all children that spend from this tx but were not received from nodeid.
Definition: txpool.cpp:326
bool IsValid() const
Definition: validation.h:119
std::string GetRejectReason() const
Definition: validation.h:123
Result GetResult() const
Definition: validation.h:122
std::string ToString() const
Definition: validation.h:125
bool IsInvalid() const
Definition: validation.h:120
256-bit unsigned big integer.
const std::vector< PrefilledProof > & getPrefilledProofs() const
Definition: compactproofs.h:76
uint64_t getShortID(const ProofId &proofid) const
const std::vector< uint64_t > & getShortIDs() const
Definition: compactproofs.h:79
ProofId getProofId() const
Definition: delegation.cpp:56
bool verify(DelegationState &state, CPubKey &auth) const
Definition: delegation.cpp:73
const DelegationId & getId() const
Definition: delegation.h:60
const LimitedProofId & getLimitedProofId() const
Definition: delegation.h:61
bool shouldRequestMoreNodes()
Returns true if we encountered a lack of node since the last call.
Definition: peermanager.h:336
bool exists(const ProofId &proofid) const
Return true if the (valid) proof exists, but only for non-dangling proofs.
Definition: peermanager.h:411
bool forPeer(const ProofId &proofid, Callable &&func) const
Definition: peermanager.h:419
bool addNode(NodeId nodeid, const ProofId &proofid)
Node API.
Definition: peermanager.cpp:34
void removeUnbroadcastProof(const ProofId &proofid)
const ProofRadixTree & getShareableProofsSnapshot() const
Definition: peermanager.h:526
bool isBoundToPeer(const ProofId &proofid) const
bool saveRemoteProof(const ProofId &proofid, const NodeId nodeid, const bool present)
void forEachPeer(Callable &&func) const
Definition: peermanager.h:425
void setInvalid(const ProofId &proofid)
bool isInvalid(const ProofId &proofid) const
bool isImmature(const ProofId &proofid) const
auto getUnbroadcastProofs() const
Definition: peermanager.h:441
bool isInConflictingPool(const ProofId &proofid) const
void sendResponse(CNode *pfrom, Response response) const
Definition: processor.cpp:545
bool addToReconcile(const AnyVoteItem &item) EXCLUSIVE_LOCKS_REQUIRED(!cs_finalizedItems)
Definition: processor.cpp:428
bool isStakingPreconsensusActivated(const CBlockIndex *pprev) const
Definition: processor.cpp:1518
int64_t getAvaproofsNodeCounter() const
Definition: processor.h:346
bool sendHello(CNode *pfrom) EXCLUSIVE_LOCKS_REQUIRED(!cs_delayedAvahelloNodeIds)
Send a avahello message.
Definition: processor.cpp:745
void setRecentlyFinalized(const uint256 &itemId) EXCLUSIVE_LOCKS_REQUIRED(!cs_finalizedItems)
Definition: processor.cpp:507
bool isQuorumEstablished() LOCKS_EXCLUDED(cs_main) EXCLUSIVE_LOCKS_REQUIRED(!cs_peerManager
Definition: processor.cpp:832
void cleanupStakingRewards(const int minHeight) EXCLUSIVE_LOCKS_REQUIRED(!cs_stakingRewards
Definition: processor.cpp:977
ProofRef getLocalProof() const
Definition: processor.cpp:767
void acceptStakeContender(const StakeContenderId &contenderId) EXCLUSIVE_LOCKS_REQUIRED(!cs_peerManager)
Definition: processor.cpp:1095
bool reconcileOrFinalize(const ProofRef &proof) EXCLUSIVE_LOCKS_REQUIRED(!cs_peerManager
Wrapper around the addToReconcile for proofs that adds back the finalization flag to the peer if it i...
Definition: processor.cpp:446
int getStakeContenderStatus(const StakeContenderId &contenderId) const EXCLUSIVE_LOCKS_REQUIRED(!cs_peerManager
Track votes on stake contenders.
Definition: processor.cpp:1072
void sendDelayedAvahello() EXCLUSIVE_LOCKS_REQUIRED(!cs_delayedAvahelloNodeIds)
Definition: processor.cpp:750
void finalizeStakeContender(const StakeContenderId &contenderId) EXCLUSIVE_LOCKS_REQUIRED(!cs_peerManager
Definition: processor.cpp:1100
bool isPreconsensusActivated(const CBlockIndex *pprev) const
Definition: processor.cpp:1514
auto withPeerManager(Callable &&func) const EXCLUSIVE_LOCKS_REQUIRED(!cs_peerManager)
Definition: processor.h:308
bool registerVotes(NodeId nodeid, const Response &response, std::vector< VoteItemUpdate > &updates, bool &disconnect, std::string &error) EXCLUSIVE_LOCKS_REQUIRED(!cs_peerManager
Definition: processor.cpp:552
void rejectStakeContender(const StakeContenderId &contenderId) EXCLUSIVE_LOCKS_REQUIRED(!cs_peerManager)
Definition: processor.cpp:1122
void avaproofsSent(NodeId nodeid) LOCKS_EXCLUDED(cs_main) EXCLUSIVE_LOCKS_REQUIRED(!cs_peerManager)
Definition: processor.cpp:811
std::vector< uint32_t > indices
std::string ToString() const
Definition: uint256.h:80
bool IsNull() const
Definition: uint256.h:32
std::string GetHex() const
Definition: uint256.cpp:16
Generate a new block, without valid proof-of-work.
Definition: miner.h:55
bool ReadBlockFromDisk(CBlock &block, const FlatFilePos &pos) const
Functions for disk access for blocks.
CBlockIndex * LookupBlockIndex(const BlockHash &hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
bool ReadRawBlockFromDisk(std::vector< uint8_t > &block, const FlatFilePos &pos) const
bool LoadingBlocks() const
Definition: blockstorage.h:368
bool IsPruneMode() const
Whether running in -prune mode.
Definition: blockstorage.h:359
256-bit opaque blob.
Definition: uint256.h:129
static const uint256 ZERO
Definition: uint256.h:134
@ BLOCK_CHECKPOINT
the block failed to meet one of our checkpoints
@ BLOCK_HEADER_LOW_WORK
the block header may be on a too-little-work chain
@ BLOCK_INVALID_HEADER
invalid proof of work or time too old
@ BLOCK_CACHED_INVALID
this block was cached as being invalid and we didn't store the reason why
@ BLOCK_CONSENSUS
invalid by consensus rules (excluding any below reasons)
@ BLOCK_MISSING_PREV
We don't have the previous block the checked one is built on.
@ BLOCK_INVALID_PREV
A block this one builds on is invalid.
@ BLOCK_MUTATED
the block's data didn't match the data committed to by the PoW
@ BLOCK_TIME_FUTURE
block timestamp was > 2 hours in the future (or our clock is bad)
@ BLOCK_RESULT_UNSET
initial value. Block has not yet been rejected
@ TX_MISSING_INPUTS
transaction was missing some of its inputs
@ TX_CHILD_BEFORE_PARENT
This tx outputs are already spent in the mempool.
@ TX_MEMPOOL_POLICY
violated mempool's fee/size/descendant/etc limits
@ TX_PACKAGE_RECONSIDERABLE
fails some policy, but might be acceptable if submitted in a (different) package
@ TX_UNKNOWN
transaction was not validated because package failed
@ TX_PREMATURE_SPEND
transaction spends a coinbase too early, or violates locktime/sequence locks
@ TX_DUPLICATE
Tx already in mempool or in the chain.
@ TX_INPUTS_NOT_STANDARD
inputs failed policy rules
@ TX_CONFLICT
Tx conflicts with a finalized tx, i.e.
@ TX_NOT_STANDARD
otherwise didn't meet our local policy rules
@ TX_AVALANCHE_RECONSIDERABLE
fails some policy, but might be reconsidered by avalanche voting
@ TX_NO_MEMPOOL
this node does not have a mempool so can't validate the transaction
@ TX_RESULT_UNSET
initial value. Tx has not yet been rejected
@ TX_CONSENSUS
invalid by consensus rules
static size_t RecursiveDynamicUsage(const CScript &script)
Definition: core_memusage.h:12
RecursiveMutex cs_main
Mutex to guard access to validation specific variables, such as reading or changing the chainstate.
Definition: cs_main.cpp:7
int64_t NodeId
Definition: eviction.h:16
ChainstateRole
This enum describes the various roles a specific Chainstate instance can take.
Definition: chain.h:14
std::array< uint8_t, CPubKey::SCHNORR_SIZE > SchnorrSig
a Schnorr signature
Definition: key.h:25
bool fLogIPs
Definition: logging.cpp:21
#define LogPrintLevel(category, level,...)
Definition: logging.h:437
#define LogPrint(category,...)
Definition: logging.h:452
#define LogInfo(...)
Definition: logging.h:413
#define LogError(...)
Definition: logging.h:419
#define LogDebug(category,...)
Definition: logging.h:446
#define LogPrintf(...)
Definition: logging.h:424
static void pool cs
@ AVALANCHE
Definition: logging.h:91
@ TXPACKAGES
Definition: logging.h:99
@ NETDEBUG
Definition: logging.h:98
@ MEMPOOLREJ
Definition: logging.h:85
@ MEMPOOL
Definition: logging.h:71
@ NET
Definition: logging.h:69
const char * FILTERLOAD
The filterload message tells the receiving peer to filter all relayed transactions and requested merk...
Definition: protocol.cpp:36
const char * CFHEADERS
cfheaders is a response to a getcfheaders request containing a filter header and a vector of filter h...
Definition: protocol.cpp:48
const char * AVAPROOFSREQ
Request for missing avalanche proofs after an avaproofs message has been processed.
Definition: protocol.cpp:58
const char * CFILTER
cfilter is a response to a getcfilters request containing a single compact filter.
Definition: protocol.cpp:46
const char * BLOCK
The block message transmits a single serialized block.
Definition: protocol.cpp:30
const char * FILTERCLEAR
The filterclear message tells the receiving peer to remove a previously-set bloom filter.
Definition: protocol.cpp:38
const char * HEADERS
The headers message sends one or more block headers to a node which previously requested certain head...
Definition: protocol.cpp:29
const char * ADDRV2
The addrv2 message relays connection information for peers on the network just like the addr message,...
Definition: protocol.cpp:21
const char * SENDHEADERS
Indicates that a node prefers to receive new block announcements via a "headers" message rather than ...
Definition: protocol.cpp:39
const char * AVAPROOFS
The avaproofs message the proof short ids of all the valid proofs that we know.
Definition: protocol.cpp:57
const char * PONG
The pong message replies to a ping message, proving to the pinging node that the ponging node is stil...
Definition: protocol.cpp:34
const char * GETAVAPROOFS
The getavaproofs message requests an avaproofs message that provides the proof short ids of all the v...
Definition: protocol.cpp:56
const char * SENDCMPCT
Contains a 1-byte bool and 8-byte LE version number.
Definition: protocol.cpp:41
const char * GETADDR
The getaddr message requests an addr message from the receiving node, preferably one with lots of IP ...
Definition: protocol.cpp:31
const char * GETCFCHECKPT
getcfcheckpt requests evenly spaced compact filter headers, enabling parallelized download and valida...
Definition: protocol.cpp:49
const char * NOTFOUND
The notfound message is a reply to a getdata message which requested an object the receiving node doe...
Definition: protocol.cpp:35
const char * GETAVAADDR
The getavaaddr message requests an addr message from the receiving node, containing IP addresses of t...
Definition: protocol.cpp:55
const char * CMPCTBLOCK
Contains a CBlockHeaderAndShortTxIDs object - providing a header and list of "short txids".
Definition: protocol.cpp:42
const char * MEMPOOL
The mempool message requests the TXIDs of transactions that the receiving node has verified as valid ...
Definition: protocol.cpp:32
const char * GETCFILTERS
getcfilters requests compact filters for a range of blocks.
Definition: protocol.cpp:45
const char * TX
The tx message transmits a single transaction.
Definition: protocol.cpp:28
const char * AVAHELLO
Contains a delegation and a signature.
Definition: protocol.cpp:51
const char * FILTERADD
The filteradd message tells the receiving peer to add a single element to a previously-set bloom filt...
Definition: protocol.cpp:37
const char * ADDR
The addr (IP address) message relays connection information for peers on the network.
Definition: protocol.cpp:20
const char * VERSION
The version message provides information about the transmitting node to the receiving node at the beg...
Definition: protocol.cpp:18
const char * GETBLOCKS
The getblocks message requests an inv message that provides block header hashes starting from a parti...
Definition: protocol.cpp:26
const char * FEEFILTER
The feefilter message tells the receiving peer not to inv us any txs which do not meet the specified ...
Definition: protocol.cpp:40
const char * GETHEADERS
The getheaders message requests a headers message that provides block headers starting from a particu...
Definition: protocol.cpp:27
const char * AVARESPONSE
Contains an avalanche::Response.
Definition: protocol.cpp:53
const char * GETDATA
The getdata message requests one or more data objects from another node.
Definition: protocol.cpp:24
const char * VERACK
The verack message acknowledges a previously-received version message, informing the connecting node ...
Definition: protocol.cpp:19
const char * BLOCKTXN
Contains a BlockTransactions.
Definition: protocol.cpp:44
const char * GETCFHEADERS
getcfheaders requests a compact filter header and the filter hashes for a range of blocks,...
Definition: protocol.cpp:47
const char * SENDADDRV2
The sendaddrv2 message signals support for receiving ADDRV2 messages (BIP155).
Definition: protocol.cpp:22
const char * PING
The ping message is sent periodically to help confirm that the receiving peer is still connected.
Definition: protocol.cpp:33
const char * AVAPOLL
Contains an avalanche::Poll.
Definition: protocol.cpp:52
const char * MERKLEBLOCK
The merkleblock message is a reply to a getdata message which requested a block using the inventory t...
Definition: protocol.cpp:25
const char * AVAPROOF
Contains an avalanche::Proof.
Definition: protocol.cpp:54
const char * CFCHECKPT
cfcheckpt is a response to a getcfcheckpt request containing a vector of evenly spaced filter headers...
Definition: protocol.cpp:50
const char * GETBLOCKTXN
Contains a BlockTransactionsRequest Peer should respond with "blocktxn" message.
Definition: protocol.cpp:43
const char * INV
The inv message (inventory message) transmits one or more inventories of objects known to the transmi...
Definition: protocol.cpp:23
ShortIdProcessor< PrefilledProof, ShortIdProcessorPrefilledProofAdapter, ProofRefCompare > ProofShortIdProcessor
Definition: compactproofs.h:52
std::variant< const ProofRef, const CBlockIndex *, const StakeContenderId, const CTransactionRef > AnyVoteItem
Definition: processor.h:95
RCUPtr< const Proof > ProofRef
Definition: proof.h:186
Definition: init.h:31
Implement std::hash so RCUPtr can be used as a key for maps or sets.
Definition: rcu.h:259
bool fListen
Definition: net.cpp:128
std::optional< CService > GetLocalAddrForPeer(CNode &node)
Returns a local address that we should advertise to this peer.
Definition: net.cpp:246
std::function< void(const CAddress &addr, const std::string &msg_type, Span< const uint8_t > data, bool is_incoming)> CaptureMessage
Defaults to CaptureMessageToFile(), but can be overridden by unit tests.
Definition: net.cpp:3193
std::string userAgent(const Config &config)
Definition: net.cpp:3141
bool IsReachable(enum Network net)
Definition: net.cpp:328
bool SeenLocal(const CService &addr)
vote for a local address
Definition: net.cpp:338
static const unsigned int MAX_SUBVERSION_LENGTH
Maximum length of the user agent string in version message.
Definition: net.h:68
static constexpr std::chrono::minutes TIMEOUT_INTERVAL
Time after which to disconnect, after waiting for a ping response (or inactivity).
Definition: net.h:62
NetPermissionFlags
static constexpr auto HEADERS_RESPONSE_TIME
How long to wait for a peer to respond to a getheaders request.
static constexpr size_t MAX_ADDR_PROCESSING_TOKEN_BUCKET
The soft limit of the address processing token bucket (the regular MAX_ADDR_RATE_PER_SECOND based inc...
static constexpr size_t MAX_AVALANCHE_STALLED_TXIDS_PER_PEER
Maximum number of stalled avalanche txids to store per peer.
static const int MAX_BLOCKS_IN_TRANSIT_PER_PEER
Number of blocks that can be requested at any given time from a single peer.
static constexpr auto BLOCK_STALLING_TIMEOUT_DEFAULT
Default time during which a peer must stall block download progress before being disconnected.
static constexpr auto GETAVAADDR_INTERVAL
Minimum time between 2 successives getavaaddr messages from the same peer.
static constexpr auto AVG_FEEFILTER_BROADCAST_INTERVAL
Verify that INVENTORY_MAX_RECENT_RELAY is enough to cache everything typically relayed before uncondi...
static constexpr unsigned int INVENTORY_BROADCAST_MAX_PER_MB
Maximum number of inventory items to send per transmission.
static constexpr auto EXTRA_PEER_CHECK_INTERVAL
How frequently to check for extra outbound peers and disconnect.
static const unsigned int BLOCK_DOWNLOAD_WINDOW
Size of the "block download window": how far ahead of our current height do we fetch?...
static uint32_t getAvalancheVoteForProof(const avalanche::Processor &avalanche, const avalanche::ProofId &id)
Decide a response for an Avalanche poll about the given proof.
static constexpr int STALE_RELAY_AGE_LIMIT
Age after which a stale block will no longer be served if requested as protection against fingerprint...
static constexpr int HISTORICAL_BLOCK_AGE
Age after which a block is considered historical for purposes of rate limiting block relay.
static constexpr auto ROTATE_ADDR_RELAY_DEST_INTERVAL
Delay between rotating the peers we relay a particular address to.
static constexpr auto MINIMUM_CONNECT_TIME
Minimum time an outbound-peer-eviction candidate must be connected for, in order to evict.
static constexpr auto CHAIN_SYNC_TIMEOUT
Timeout for (unprotected) outbound peers to sync to our chainwork.
static const unsigned int NODE_NETWORK_LIMITED_MIN_BLOCKS
Minimum blocks required to signal NODE_NETWORK_LIMITED.
static constexpr auto AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL
Average delay between local address broadcasts.
static const int MAX_BLOCKTXN_DEPTH
Maximum depth of blocks we're willing to respond to GETBLOCKTXN requests for.
static constexpr uint64_t CMPCTBLOCKS_VERSION
The compactblocks version we support.
bool IsAvalancheMessageType(const std::string &msg_type)
static constexpr int32_t MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT
Protect at least this many outbound peers from disconnection due to slow/behind headers chain.
static std::chrono::microseconds ComputeRequestTime(const CNode &node, const InvRequestTracker< InvId > &requestTracker, const DataRequestParameters &requestParams, std::chrono::microseconds current_time, bool preferred)
Compute the request time for this announcement, current time plus delays for:
static constexpr auto INBOUND_INVENTORY_BROADCAST_INTERVAL
Average delay between trickled inventory transmissions for inbound peers.
static constexpr DataRequestParameters TX_REQUEST_PARAMS
static constexpr auto MAX_FEEFILTER_CHANGE_DELAY
Maximum feefilter broadcast delay after significant change.
static constexpr uint32_t MAX_GETCFILTERS_SIZE
Maximum number of compact filters that may be requested with one getcfilters.
static constexpr auto HEADERS_DOWNLOAD_TIMEOUT_BASE
Headers download timeout.
static const unsigned int MAX_GETDATA_SZ
Limit to avoid sending big packets.
static constexpr double BLOCK_DOWNLOAD_TIMEOUT_BASE
Block download timeout base, expressed in multiples of the block interval (i.e.
static constexpr auto AVALANCHE_AVAPROOFS_TIMEOUT
If no proof was requested from a compact proof message after this timeout expired,...
static constexpr auto STALE_CHECK_INTERVAL
How frequently to check for stale tips.
static constexpr unsigned int INVENTORY_MAX_RECENT_RELAY
The number of most recently announced transactions a peer can request.
static constexpr auto UNCONDITIONAL_RELAY_DELAY
How long a transaction has to be in the mempool before it can unconditionally be relayed.
static constexpr auto AVG_ADDRESS_BROADCAST_INTERVAL
Average delay between peer address broadcasts.
static const unsigned int MAX_LOCATOR_SZ
The maximum number of entries in a locator.
static constexpr double BLOCK_DOWNLOAD_TIMEOUT_PER_PEER
Additional block download timeout per parallel downloading peer (i.e.
static constexpr double MAX_ADDR_RATE_PER_SECOND
The maximum rate of address records we're willing to process on average.
static constexpr auto PING_INTERVAL
Time between pings automatically sent out for latency probing and keepalive.
static const int MAX_CMPCTBLOCK_DEPTH
Maximum depth of blocks we're willing to serve as compact blocks to peers when requested.
static constexpr DataRequestParameters PROOF_REQUEST_PARAMS
static const unsigned int MAX_BLOCKS_TO_ANNOUNCE
Maximum number of headers to announce when relaying blocks with headers message.
static bool TooManyAnnouncements(const CNode &node, const InvRequestTracker< InvId > &requestTracker, const DataRequestParameters &requestParams)
static constexpr uint32_t MAX_GETCFHEADERS_SIZE
Maximum number of cf hashes that may be requested with one getcfheaders.
static constexpr auto BLOCK_STALLING_TIMEOUT_MAX
Maximum timeout for stalling block download.
static constexpr auto HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER
static constexpr uint64_t RANDOMIZER_ID_ADDRESS_RELAY
SHA256("main address relay")[0:8].
static constexpr size_t MAX_PCT_ADDR_TO_SEND
the maximum percentage of addresses from our addrman to return in response to a getaddr message.
static const unsigned int MAX_INV_SZ
The maximum number of entries in an 'inv' protocol message.
static constexpr unsigned int INVENTORY_BROADCAST_PER_SECOND
Maximum rate of inventory items to send per second.
static constexpr size_t MAX_ADDR_TO_SEND
The maximum number of address records permitted in an ADDR message.
static const unsigned int MAX_CMPCTBLOCKS_INFLIGHT_PER_BLOCK
Maximum number of outstanding CMPCTBLOCK requests for the same block.
static const unsigned int MAX_HEADERS_RESULTS
Number of headers sent in one getheaders result.
bool IsProxy(const CNetAddr &addr)
Definition: netbase.cpp:763
static constexpr NodeId NO_NODE
Special NodeId that represent no node.
Definition: nodeid.h:15
uint256 GetPackageHash(const Package &package)
Definition: packages.cpp:129
std::vector< CTransactionRef > Package
A package is an ordered list of transactions.
Definition: packages.h:40
static constexpr Amount DEFAULT_MIN_RELAY_TX_FEE_PER_KB(1000 *SATOSHI)
Default for -minrelaytxfee, minimum relay fee for transactions.
std::shared_ptr< const CTransaction > CTransactionRef
Definition: transaction.h:315
Response response
Definition: processor.cpp:522
SchnorrSig sig
Definition: processor.cpp:523
static constexpr size_t AVALANCHE_MAX_ELEMENT_POLL
Maximum item that can be polled at once.
Definition: processor.h:54
void SetServiceFlagsIBDCache(bool state)
Set the current IBD status in order to figure out the desirable service flags.
Definition: protocol.cpp:215
ServiceFlags GetDesirableServiceFlags(ServiceFlags services)
Gets the set of service flags which are "desirable" for a given peer.
Definition: protocol.cpp:207
static const unsigned int MAX_PROTOCOL_MESSAGE_LENGTH
Maximum length of incoming protocol messages (Currently 2MB).
Definition: protocol.h:25
static bool HasAllDesirableServiceFlags(ServiceFlags services)
A shortcut for (services & GetDesirableServiceFlags(services)) == GetDesirableServiceFlags(services),...
Definition: protocol.h:428
@ MSG_TX
Definition: protocol.h:574
@ MSG_AVA_STAKE_CONTENDER
Definition: protocol.h:582
@ MSG_AVA_PROOF
Definition: protocol.h:581
@ MSG_BLOCK
Definition: protocol.h:575
@ MSG_CMPCT_BLOCK
Defined in BIP152.
Definition: protocol.h:580
ServiceFlags
nServices flags.
Definition: protocol.h:336
@ NODE_NONE
Definition: protocol.h:339
@ NODE_NETWORK_LIMITED
Definition: protocol.h:366
@ NODE_BLOOM
Definition: protocol.h:353
@ NODE_NETWORK
Definition: protocol.h:343
@ NODE_COMPACT_FILTERS
Definition: protocol.h:361
@ NODE_AVALANCHE
Definition: protocol.h:381
static bool MayHaveUsefulAddressDB(ServiceFlags services)
Checks if a peer with the given service flags may be capable of having a robust address-storage DB.
Definition: protocol.h:436
void Shuffle(I first, I last, R &&rng)
More efficient than using std::shuffle on a FastRandomContext.
Definition: random.h:512
reverse_range< T > reverse_iterate(T &x)
static const unsigned int MAX_SCRIPT_ELEMENT_SIZE
Definition: script.h:25
@ SER_NETWORK
Definition: serialize.h:155
void Unserialize(Stream &, V)=delete
#define LIMITED_STRING(obj, n)
Definition: serialize.h:644
static auto WithParams(const Params &params, T &&t)
Return a wrapper around t that (de)serializes it with specified parameter params.
Definition: serialize.h:1353
uint64_t ReadCompactSize(Stream &is, bool range_check=true)
Decode a CompactSize-encoded variable-length integer.
Definition: serialize.h:476
constexpr auto MakeUCharSpan(V &&v) -> decltype(UCharSpanCast(Span{std::forward< V >(v)}))
Like the Span constructor, but for (const) uint8_t member types only.
Definition: span.h:350
static const double AVALANCHE_STATISTICS_DECAY_FACTOR
Pre-computed decay factor for the avalanche statistics computation.
Definition: statistics.h:18
static constexpr std::chrono::minutes AVALANCHE_STATISTICS_REFRESH_PERIOD
Refresh period for the avalanche statistics computation.
Definition: statistics.h:11
std::string ToString(const T &t)
Locale-independent version of std::to_string.
Definition: string.h:108
Definition: amount.h:21
static constexpr Amount zero() noexcept
Definition: amount.h:34
A BlockHash is a unqiue identifier for a block.
Definition: blockhash.h:13
Describes a place in the block chain to another node such that if the other node doesn't have the sam...
Definition: block.h:108
std::vector< BlockHash > vHave
Definition: block.h:120
bool IsNull() const
Definition: block.h:135
std::chrono::microseconds m_ping_wait
Amount m_fee_filter_received
std::vector< int > vHeightInFlight
uint64_t m_addr_rate_limited
uint64_t m_addr_processed
int64_t presync_height
ServiceFlags their_services
std::vector< uint8_t > data
Definition: net.h:133
std::string m_type
Definition: net.h:134
Parameters that influence chain consensus.
Definition: params.h:34
int64_t nPowTargetSpacing
Definition: params.h:80
std::chrono::seconds PowTargetSpacing() const
Definition: params.h:82
const std::chrono::seconds overloaded_peer_delay
How long to delay requesting data from overloaded peers (see max_peer_request_in_flight).
const size_t max_peer_announcements
Maximum number of inventories to consider for requesting, per peer.
const std::chrono::seconds nonpref_peer_delay
How long to delay requesting data from non-preferred peers.
const NetPermissionFlags bypass_request_limits_permissions
Permission flags a peer requires to bypass the request limits tracking limits and delay penalty.
const std::chrono::microseconds getdata_interval
How long to wait (in microseconds) before a data request from an additional peer.
const size_t max_peer_request_in_flight
Maximum number of in-flight data requests from a peer.
Validation result for a transaction evaluated by MemPoolAccept (single or package).
Definition: validation.h:213
const ResultType m_result_type
Result type.
Definition: validation.h:224
const TxValidationState m_state
Contains information about why the transaction failed.
Definition: validation.h:227
@ MEMPOOL_ENTRY
Valid, transaction was already in the mempool.
@ VALID
Fully validated, valid.
static time_point now() noexcept
Return current system time or mocked time, if set.
Definition: time.cpp:71
std::chrono::time_point< NodeClock > time_point
Definition: time.h:19
Validation result for package mempool acceptance.
Definition: validation.h:316
PackageValidationState m_state
Definition: validation.h:317
std::map< TxId, MempoolAcceptResult > m_tx_results
Map from txid to finished MempoolAcceptResults.
Definition: validation.h:325
This is a radix tree storing values identified by a unique key.
Definition: radix.h:40
A TxId is the identifier of a transaction.
Definition: txid.h:14
std::chrono::seconds registration_time
Definition: peermanager.h:95
const ProofId & getProofId() const
Definition: peermanager.h:110
ProofRef proof
Definition: peermanager.h:91
StakeContenderIds are unique for each block to ensure that the peer polling for their acceptance has ...
#define AssertLockNotHeld(cs)
Definition: sync.h:163
#define LOCK2(cs1, cs2)
Definition: sync.h:309
#define LOCK(cs)
Definition: sync.h:306
#define WITH_LOCK(cs, code)
Run code while locking a mutex.
Definition: sync.h:357
static int count
Definition: tests.c:31
#define EXCLUSIVE_LOCKS_REQUIRED(...)
Definition: threadsafety.h:56
#define GUARDED_BY(x)
Definition: threadsafety.h:45
#define LOCKS_EXCLUDED(...)
Definition: threadsafety.h:55
#define NO_THREAD_SAFETY_ANALYSIS
Definition: threadsafety.h:58
#define PT_GUARDED_BY(x)
Definition: threadsafety.h:46
int64_t GetTime()
DEPRECATED Use either ClockType::now() or Now<TimePointType>() if a cast is needed.
Definition: time.cpp:105
constexpr int64_t count_microseconds(std::chrono::microseconds t)
Definition: time.h:63
constexpr int64_t count_seconds(std::chrono::seconds t)
Definition: time.h:57
std::chrono::time_point< NodeClock, std::chrono::seconds > NodeSeconds
Definition: time.h:25
double CountSecondsDouble(SecondsDouble t)
Helper to count the seconds in any std::chrono::duration type.
Definition: time.h:76
NodeClock::time_point GetAdjustedTime()
Definition: timedata.cpp:35
void AddTimeData(const CNetAddr &ip, int64_t nOffsetSample)
Definition: timedata.cpp:45
#define strprintf
Format arguments and return the string or write to given std::ostream (see tinyformat::format doc for...
Definition: tinyformat.h:1202
#define TRACE6(context, event, a, b, c, d, e, f)
Definition: trace.h:45
@ AVALANCHE
Removed by avalanche vote.
std::string SanitizeString(std::string_view str, int rule)
Remove unsafe chars.
arith_uint256 CalculateHeadersWork(const std::vector< CBlockHeader > &headers)
Return the sum of the work on a given set of headers.
bool HasValidProofOfWork(const std::vector< CBlockHeader > &headers, const Consensus::Params &consensusParams)
Check with the proof of work on each blockheader matches the value in nBits.
PackageMempoolAcceptResult ProcessNewPackage(Chainstate &active_chainstate, CTxMemPool &pool, const Package &package, bool test_accept)
Validate (and maybe submit) a package to the mempool.
bool IsBlockMutated(const CBlock &block)
Check if a block has been mutated (with respect to its merkle root).
std::vector< Coin > GetSpentCoins(const CTransactionRef &ptx, const CCoinsViewCache &coins_view)
Get the coins spent by ptx from the coins_view.
AssertLockHeld(pool.cs)
assert(!tx.IsCoinBase())
static const unsigned int MIN_BLOCKS_TO_KEEP
Block files containing a block-height within MIN_BLOCKS_TO_KEEP of ActiveChain().Tip() will not be pr...
Definition: validation.h:100
CMainSignals & GetMainSignals()
static const int INIT_PROTO_VERSION
initial proto version, to be increased after version/verack negotiation
Definition: version.h:14
static const int SHORT_IDS_BLOCKS_VERSION
short-id-based block download starts with this version
Definition: version.h:35
static const int SENDHEADERS_VERSION
"sendheaders" command and announcing blocks with headers starts with this version
Definition: version.h:28
static const int PROTOCOL_VERSION
network protocol versioning
Definition: version.h:11
static const int FEEFILTER_VERSION
"feefilter" tells peers to filter invs to you by fee starts with this version
Definition: version.h:32
static const int MIN_PEER_PROTO_VERSION
disconnect from peers older than this proto version
Definition: version.h:17
static const int INVALID_CB_NO_BAN_VERSION
not banning for invalid compact blocks starts with this version
Definition: version.h:38
static const int BIP0031_VERSION
BIP 0031, pong message, is enabled for all versions AFTER this one.
Definition: version.h:20