Bitcoin ABC 0.32.5
P2P Digital Currency
net_processing.cpp
Go to the documentation of this file.
1// Copyright (c) 2009-2010 Satoshi Nakamoto
2// Copyright (c) 2009-2016 The Bitcoin Core developers
3// Distributed under the MIT software license, see the accompanying
4// file COPYING or http://www.opensource.org/licenses/mit-license.php.
5
6#include <net_processing.h>
7
8#include <addrman.h>
11#include <avalanche/processor.h>
12#include <avalanche/proof.h>
16#include <banman.h>
17#include <blockencodings.h>
18#include <blockfilter.h>
19#include <blockvalidity.h>
20#include <chain.h>
21#include <chainparams.h>
22#include <config.h>
23#include <consensus/amount.h>
25#include <hash.h>
26#include <headerssync.h>
28#include <invrequest.h>
29#include <kernel/chain.h>
31#include <merkleblock.h>
32#include <netbase.h>
33#include <netmessagemaker.h>
34#include <node/blockstorage.h>
35#include <node/miner.h>
36#include <policy/fees.h>
37#include <policy/policy.h>
38#include <policy/settings.h>
39#include <primitives/block.h>
41#include <random.h>
42#include <reverse_iterator.h>
43#include <scheduler.h>
44#include <streams.h>
45#include <timedata.h>
46#include <tinyformat.h>
47#include <txmempool.h>
48#include <txorphanage.h>
49#include <util/check.h>
50#include <util/strencodings.h>
51#include <util/trace.h>
52#include <validation.h>
53
54#include <boost/multi_index/hashed_index.hpp>
55#include <boost/multi_index/member.hpp>
56#include <boost/multi_index/ordered_index.hpp>
57#include <boost/multi_index_container.hpp>
58
59#include <algorithm>
60#include <atomic>
61#include <chrono>
62#include <functional>
63#include <future>
64#include <memory>
65#include <numeric>
66#include <typeinfo>
67
72static constexpr auto UNCONDITIONAL_RELAY_DELAY = 2min;
77static constexpr auto HEADERS_DOWNLOAD_TIMEOUT_BASE = 15min;
78static constexpr auto HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER = 1ms;
80static constexpr auto HEADERS_RESPONSE_TIME{2min};
87static constexpr auto CHAIN_SYNC_TIMEOUT{20min};
89static constexpr auto STALE_CHECK_INTERVAL{10min};
91static constexpr auto EXTRA_PEER_CHECK_INTERVAL{45s};
96static constexpr auto MINIMUM_CONNECT_TIME{30s};
98static constexpr uint64_t RANDOMIZER_ID_ADDRESS_RELAY = 0x3cac0035b5866b90ULL;
101static constexpr int STALE_RELAY_AGE_LIMIT = 30 * 24 * 60 * 60;
104static constexpr int HISTORICAL_BLOCK_AGE = 7 * 24 * 60 * 60;
108static constexpr auto PING_INTERVAL{2min};
110static const unsigned int MAX_LOCATOR_SZ = 101;
112static const unsigned int MAX_INV_SZ = 50000;
113static_assert(MAX_PROTOCOL_MESSAGE_LENGTH > MAX_INV_SZ * sizeof(CInv),
114 "Max protocol message length must be greater than largest "
115 "possible INV message");
116
118static constexpr auto GETAVAADDR_INTERVAL{2min};
119
124static constexpr auto AVALANCHE_AVAPROOFS_TIMEOUT{2min};
125
127static constexpr size_t MAX_AVALANCHE_STALLED_TXIDS_PER_PEER{100};
128
136
146
148 const std::chrono::seconds nonpref_peer_delay;
149
154 const std::chrono::seconds overloaded_peer_delay;
155
160 const std::chrono::microseconds getdata_interval;
161
167};
168
170 100, // max_peer_request_in_flight
171 5000, // max_peer_announcements
172 std::chrono::seconds(2), // nonpref_peer_delay
173 std::chrono::seconds(2), // overloaded_peer_delay
174 std::chrono::seconds(60), // getdata_interval
175 NetPermissionFlags::Relay, // bypass_request_limits_permissions
176};
177
179 100, // max_peer_request_in_flight
180 5000, // max_peer_announcements
181 std::chrono::seconds(2), // nonpref_peer_delay
182 std::chrono::seconds(2), // overloaded_peer_delay
183 std::chrono::seconds(60), // getdata_interval
185 BypassProofRequestLimits, // bypass_request_limits_permissions
186};
187
192static const unsigned int MAX_GETDATA_SZ = 1000;
196static const int MAX_BLOCKS_IN_TRANSIT_PER_PEER = 16;
202static constexpr auto BLOCK_STALLING_TIMEOUT_DEFAULT{2s};
204static constexpr auto BLOCK_STALLING_TIMEOUT_MAX{64s};
209static const int MAX_CMPCTBLOCK_DEPTH = 5;
214static const int MAX_BLOCKTXN_DEPTH = 10;
216 "MAX_BLOCKTXN_DEPTH too high");
224static const unsigned int BLOCK_DOWNLOAD_WINDOW = 1024;
229static constexpr double BLOCK_DOWNLOAD_TIMEOUT_BASE = 1;
233static constexpr double BLOCK_DOWNLOAD_TIMEOUT_PER_PEER = 0.5;
238static const unsigned int MAX_BLOCKS_TO_ANNOUNCE = 8;
240static const unsigned int NODE_NETWORK_LIMITED_MIN_BLOCKS = 288;
244static constexpr auto AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL{24h};
248static constexpr auto AVG_ADDRESS_BROADCAST_INTERVAL{30s};
250static constexpr auto ROTATE_ADDR_RELAY_DEST_INTERVAL{24h};
255static constexpr auto INBOUND_INVENTORY_BROADCAST_INTERVAL{5s};
260static constexpr unsigned int INVENTORY_BROADCAST_PER_SECOND = 7;
262static constexpr unsigned int INVENTORY_BROADCAST_MAX_PER_MB =
266static constexpr unsigned int INVENTORY_MAX_RECENT_RELAY = 3500;
275 std::chrono::seconds{1},
276 "INVENTORY_RELAY_MAX too low");
277
281static constexpr auto AVG_FEEFILTER_BROADCAST_INTERVAL{10min};
285static constexpr auto MAX_FEEFILTER_CHANGE_DELAY{5min};
290static constexpr uint32_t MAX_GETCFILTERS_SIZE = 1000;
295static constexpr uint32_t MAX_GETCFHEADERS_SIZE = 2000;
300static constexpr size_t MAX_PCT_ADDR_TO_SEND = 23;
305static constexpr double MAX_ADDR_RATE_PER_SECOND{0.1};
313static constexpr uint64_t CMPCTBLOCKS_VERSION{1};
314
315// Internal stuff
316namespace {
320struct QueuedBlock {
325 const CBlockIndex *pindex;
327 std::unique_ptr<PartiallyDownloadedBlock> partialBlock;
328};
329
330struct StalledTxId {
331 TxId txid;
332 std::chrono::seconds timeAdded;
333
334 StalledTxId(TxId txid_, std::chrono::seconds timeAdded_)
335 : txid(txid_), timeAdded(timeAdded_){};
336};
337
338struct by_txid {};
339struct by_time {};
340
341using StalledTxIdSet = boost::multi_index_container<
342 StalledTxId,
343 boost::multi_index::indexed_by<
344 // sort by txid
345 boost::multi_index::hashed_unique<
346 boost::multi_index::tag<by_txid>,
347 boost::multi_index::member<StalledTxId, TxId, &StalledTxId::txid>,
349 // sort by timeAdded
350 boost::multi_index::ordered_non_unique<
351 boost::multi_index::tag<by_time>,
352 boost::multi_index::member<StalledTxId, std::chrono::seconds,
353 &StalledTxId::timeAdded>>>>;
354
368struct Peer {
370 const NodeId m_id{0};
371
387 const ServiceFlags m_our_services;
388
390 std::atomic<ServiceFlags> m_their_services{NODE_NONE};
391
393 Mutex m_misbehavior_mutex;
398 bool m_should_discourage GUARDED_BY(m_misbehavior_mutex){false};
399
401 Mutex m_block_inv_mutex;
407 std::vector<BlockHash> m_blocks_for_inv_relay GUARDED_BY(m_block_inv_mutex);
413 std::vector<BlockHash>
414 m_blocks_for_headers_relay GUARDED_BY(m_block_inv_mutex);
415
422 BlockHash m_continuation_block GUARDED_BY(m_block_inv_mutex){};
423
425 std::atomic<int> m_starting_height{-1};
426
428 std::atomic<uint64_t> m_ping_nonce_sent{0};
430 std::atomic<std::chrono::microseconds> m_ping_start{0us};
432 std::atomic<bool> m_ping_queued{false};
433
441 Amount::zero()};
442 std::chrono::microseconds m_next_send_feefilter
444
445 struct TxRelay {
446 mutable RecursiveMutex m_bloom_filter_mutex;
455 bool m_relay_txs GUARDED_BY(m_bloom_filter_mutex){false};
460 std::unique_ptr<CBloomFilter>
461 m_bloom_filter PT_GUARDED_BY(m_bloom_filter_mutex)
462 GUARDED_BY(m_bloom_filter_mutex){nullptr};
463
465 CRollingBloomFilter m_recently_announced_invs GUARDED_BY(
467 0.000001};
468
469 mutable RecursiveMutex m_tx_inventory_mutex;
475 CRollingBloomFilter m_tx_inventory_known_filter
476 GUARDED_BY(m_tx_inventory_mutex){50000, 0.000001};
482 std::set<TxId> m_tx_inventory_to_send GUARDED_BY(m_tx_inventory_mutex);
488 bool m_send_mempool GUARDED_BY(m_tx_inventory_mutex){false};
490 std::atomic<std::chrono::seconds> m_last_mempool_req{0s};
495 std::chrono::microseconds
496 m_next_inv_send_time GUARDED_BY(m_tx_inventory_mutex){0};
497
502 std::atomic<Amount> m_fee_filter_received{Amount::zero()};
503
507 StalledTxIdSet
508 m_avalanche_stalled_txids GUARDED_BY(m_tx_inventory_mutex);
509 };
510
511 /*
512 * Initializes a TxRelay struct for this peer. Can be called at most once
513 * for a peer.
514 */
515 TxRelay *SetTxRelay() EXCLUSIVE_LOCKS_REQUIRED(!m_tx_relay_mutex) {
516 LOCK(m_tx_relay_mutex);
517 Assume(!m_tx_relay);
518 m_tx_relay = std::make_unique<Peer::TxRelay>();
519 return m_tx_relay.get();
520 };
521
522 TxRelay *GetTxRelay() EXCLUSIVE_LOCKS_REQUIRED(!m_tx_relay_mutex) {
523 return WITH_LOCK(m_tx_relay_mutex, return m_tx_relay.get());
524 };
525 const TxRelay *GetTxRelay() const
526 EXCLUSIVE_LOCKS_REQUIRED(!m_tx_relay_mutex) {
527 return WITH_LOCK(m_tx_relay_mutex, return m_tx_relay.get());
528 };
529
530 struct ProofRelay {
531 mutable RecursiveMutex m_proof_inventory_mutex;
532 std::set<avalanche::ProofId>
533 m_proof_inventory_to_send GUARDED_BY(m_proof_inventory_mutex);
534 // Prevent sending proof invs if the peer already knows about them
535 CRollingBloomFilter m_proof_inventory_known_filter
536 GUARDED_BY(m_proof_inventory_mutex){10000, 0.000001};
540 CRollingBloomFilter m_recently_announced_proofs GUARDED_BY(
542 0.000001};
543 std::chrono::microseconds m_next_inv_send_time{0};
544
546 sharedProofs;
547 std::atomic<std::chrono::seconds> lastSharedProofsUpdate{0s};
548 std::atomic<bool> compactproofs_requested{false};
549 };
550
555 const std::unique_ptr<ProofRelay> m_proof_relay;
556
560 std::vector<CAddress>
572 std::unique_ptr<CRollingBloomFilter>
590 std::atomic_bool m_addr_relay_enabled{false};
592 bool m_getaddr_sent GUARDED_BY(NetEventsInterface::g_msgproc_mutex){false};
594 mutable Mutex m_addr_send_times_mutex;
596 std::chrono::microseconds
597 m_next_addr_send GUARDED_BY(m_addr_send_times_mutex){0};
599 std::chrono::microseconds
600 m_next_local_addr_send GUARDED_BY(m_addr_send_times_mutex){0};
605 std::atomic_bool m_wants_addrv2{false};
607 bool m_getaddr_recvd GUARDED_BY(NetEventsInterface::g_msgproc_mutex){false};
609 mutable Mutex m_addr_token_bucket_mutex;
614 double m_addr_token_bucket GUARDED_BY(m_addr_token_bucket_mutex){1.0};
616 std::chrono::microseconds
617 m_addr_token_timestamp GUARDED_BY(NetEventsInterface::g_msgproc_mutex){
618 GetTime<std::chrono::microseconds>()};
620 std::atomic<uint64_t> m_addr_rate_limited{0};
625 std::atomic<uint64_t> m_addr_processed{0};
626
631 bool m_inv_triggered_getheaders_before_sync
633
635 Mutex m_getdata_requests_mutex;
637 std::deque<CInv> m_getdata_requests GUARDED_BY(m_getdata_requests_mutex);
638
640 NodeClock::time_point m_last_getheaders_timestamp
642
644 Mutex m_headers_sync_mutex;
649 std::unique_ptr<HeadersSyncState>
650 m_headers_sync PT_GUARDED_BY(m_headers_sync_mutex)
651 GUARDED_BY(m_headers_sync_mutex){};
652
654 std::atomic<bool> m_sent_sendheaders{false};
655
657 std::chrono::microseconds m_headers_sync_timeout
659
664 bool m_prefers_headers GUARDED_BY(NetEventsInterface::g_msgproc_mutex){
665 false};
666
667 explicit Peer(NodeId id, ServiceFlags our_services, bool fRelayProofs)
668 : m_id(id), m_our_services{our_services},
669 m_proof_relay(fRelayProofs ? std::make_unique<ProofRelay>()
670 : nullptr) {}
671
672private:
673 mutable Mutex m_tx_relay_mutex;
674
676 std::unique_ptr<TxRelay> m_tx_relay GUARDED_BY(m_tx_relay_mutex);
677};
678
679using PeerRef = std::shared_ptr<Peer>;
680
687struct CNodeState {
689 const CBlockIndex *pindexBestKnownBlock{nullptr};
691 BlockHash hashLastUnknownBlock{};
693 const CBlockIndex *pindexLastCommonBlock{nullptr};
695 const CBlockIndex *pindexBestHeaderSent{nullptr};
697 bool fSyncStarted{false};
700 std::chrono::microseconds m_stalling_since{0us};
701 std::list<QueuedBlock> vBlocksInFlight;
704 std::chrono::microseconds m_downloading_since{0us};
706 bool fPreferredDownload{false};
711 bool m_requested_hb_cmpctblocks{false};
713 bool m_provides_cmpctblocks{false};
714
741 struct ChainSyncTimeoutState {
744 std::chrono::seconds m_timeout{0s};
746 const CBlockIndex *m_work_header{nullptr};
748 bool m_sent_getheaders{false};
751 bool m_protect{false};
752 };
753
754 ChainSyncTimeoutState m_chain_sync;
755
757 int64_t m_last_block_announcement{0};
758
760 const bool m_is_inbound;
761
762 CNodeState(bool is_inbound) : m_is_inbound(is_inbound) {}
763};
764
765class PeerManagerImpl final : public PeerManager {
766public:
767 PeerManagerImpl(CConnman &connman, AddrMan &addrman, BanMan *banman,
768 ChainstateManager &chainman, CTxMemPool &pool,
769 avalanche::Processor *const avalanche, Options opts);
770
773 const std::shared_ptr<const CBlock> &pblock,
774 const CBlockIndex *pindexConnected) override
775 EXCLUSIVE_LOCKS_REQUIRED(!m_recent_confirmed_transactions_mutex);
776 void BlockDisconnected(const std::shared_ptr<const CBlock> &block,
777 const CBlockIndex *pindex) override
778 EXCLUSIVE_LOCKS_REQUIRED(!m_recent_confirmed_transactions_mutex);
779 void UpdatedBlockTip(const CBlockIndex *pindexNew,
780 const CBlockIndex *pindexFork,
781 bool fInitialDownload) override
782 EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
783 void BlockChecked(const CBlock &block,
784 const BlockValidationState &state) override
785 EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
786 void NewPoWValidBlock(const CBlockIndex *pindex,
787 const std::shared_ptr<const CBlock> &pblock) override
788 EXCLUSIVE_LOCKS_REQUIRED(!m_most_recent_block_mutex);
789
791 void InitializeNode(const Config &config, CNode &node,
792 ServiceFlags our_services) override
793 EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
794 void FinalizeNode(const Config &config, const CNode &node) override
795 EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !cs_proofrequest,
796 !m_headers_presync_mutex);
797 bool ProcessMessages(const Config &config, CNode *pfrom,
798 std::atomic<bool> &interrupt) override
799 EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex,
800 !m_recent_confirmed_transactions_mutex,
801 !m_most_recent_block_mutex, !cs_proofrequest,
802 !m_headers_presync_mutex, g_msgproc_mutex);
803 bool SendMessages(const Config &config, CNode *pto) override
804 EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex,
805 !m_recent_confirmed_transactions_mutex,
806 !m_most_recent_block_mutex, !cs_proofrequest,
807 g_msgproc_mutex);
808
810 void StartScheduledTasks(CScheduler &scheduler) override;
811 void CheckForStaleTipAndEvictPeers() override;
812 std::optional<std::string>
813 FetchBlock(const Config &config, NodeId peer_id,
814 const CBlockIndex &block_index) override;
815 bool GetNodeStateStats(NodeId nodeid, CNodeStateStats &stats) const override
816 EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
817 bool IgnoresIncomingTxs() override { return m_opts.ignore_incoming_txs; }
818 void SendPings() override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
819 void RelayTransaction(const TxId &txid) override
820 EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
821 void RelayProof(const avalanche::ProofId &proofid) override
822 EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
823 void SetBestHeight(int height) override { m_best_height = height; };
824 void UnitTestMisbehaving(NodeId peer_id) override
825 EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex) {
826 Misbehaving(*Assert(GetPeerRef(peer_id)), "");
827 }
828 void ProcessMessage(const Config &config, CNode &pfrom,
829 const std::string &msg_type, CDataStream &vRecv,
830 const std::chrono::microseconds time_received,
831 const std::atomic<bool> &interruptMsgProc) override
832 EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex,
833 !m_recent_confirmed_transactions_mutex,
834 !m_most_recent_block_mutex, !cs_proofrequest,
835 !m_headers_presync_mutex, g_msgproc_mutex);
837 int64_t time_in_seconds) override;
838
839private:
844 void ConsiderEviction(CNode &pto, Peer &peer,
845 std::chrono::seconds time_in_seconds)
846 EXCLUSIVE_LOCKS_REQUIRED(cs_main, g_msgproc_mutex);
847
852 void EvictExtraOutboundPeers(std::chrono::seconds now)
854
859 void ReattemptInitialBroadcast(CScheduler &scheduler)
860 EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
861
865 void UpdateAvalancheStatistics() const;
866
870 void AvalanchePeriodicNetworking(CScheduler &scheduler) const;
871
876 PeerRef GetPeerRef(NodeId id) const EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
877
882 PeerRef RemovePeer(NodeId id) EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
883
888 void Misbehaving(Peer &peer, const std::string &message);
889
900 void MaybePunishNodeForBlock(NodeId nodeid,
901 const BlockValidationState &state,
902 bool via_compact_block,
903 const std::string &message = "")
904 EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
905
910 void MaybePunishNodeForTx(NodeId nodeid, const TxValidationState &state,
911 const std::string &message = "")
912 EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
913
923 bool MaybeDiscourageAndDisconnect(CNode &pnode, Peer &peer);
924
939 void ProcessInvalidTx(NodeId nodeid, const CTransactionRef &tx,
940 const TxValidationState &result,
941 bool maybe_add_extra_compact_tx)
942 EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, g_msgproc_mutex, cs_main);
943
944 struct PackageToValidate {
945 const Package m_txns;
946 const std::vector<NodeId> m_senders;
948 explicit PackageToValidate(const CTransactionRef &parent,
949 const CTransactionRef &child,
950 NodeId parent_sender, NodeId child_sender)
951 : m_txns{parent, child}, m_senders{parent_sender, child_sender} {}
952
953 std::string ToString() const {
954 Assume(m_txns.size() == 2);
955 return strprintf(
956 "parent %s (sender=%d) + child %s (sender=%d)",
957 m_txns.front()->GetId().ToString(), m_senders.front(),
958 m_txns.back()->GetId().ToString(), m_senders.back());
959 }
960 };
961
967 void ProcessPackageResult(const PackageToValidate &package_to_validate,
968 const PackageMempoolAcceptResult &package_result)
969 EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, g_msgproc_mutex, cs_main);
970
977 std::optional<PackageToValidate> Find1P1CPackage(const CTransactionRef &ptx,
978 NodeId nodeid)
979 EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, g_msgproc_mutex, cs_main);
980
986 void ProcessValidTx(NodeId nodeid, const CTransactionRef &tx)
987 EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, g_msgproc_mutex, cs_main);
988
1004 bool ProcessOrphanTx(const Config &config, Peer &peer)
1005 EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, g_msgproc_mutex);
1006
1017 void ProcessHeadersMessage(const Config &config, CNode &pfrom, Peer &peer,
1018 std::vector<CBlockHeader> &&headers,
1019 bool via_compact_block)
1020 EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_headers_presync_mutex,
1021 g_msgproc_mutex);
1022
1023 // Various helpers for headers processing, invoked by
1024 // ProcessHeadersMessage()
1029 bool CheckHeadersPoW(const std::vector<CBlockHeader> &headers,
1030 const Consensus::Params &consensusParams, Peer &peer);
1032 arith_uint256 GetAntiDoSWorkThreshold();
1039 void HandleUnconnectingHeaders(CNode &pfrom, Peer &peer,
1040 const std::vector<CBlockHeader> &headers)
1041 EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
1043 bool
1044 CheckHeadersAreContinuous(const std::vector<CBlockHeader> &headers) const;
1064 bool IsContinuationOfLowWorkHeadersSync(Peer &peer, CNode &pfrom,
1065 std::vector<CBlockHeader> &headers)
1066 EXCLUSIVE_LOCKS_REQUIRED(peer.m_headers_sync_mutex,
1067 !m_headers_presync_mutex, g_msgproc_mutex);
1081 bool TryLowWorkHeadersSync(Peer &peer, CNode &pfrom,
1082 const CBlockIndex *chain_start_header,
1083 std::vector<CBlockHeader> &headers)
1084 EXCLUSIVE_LOCKS_REQUIRED(!peer.m_headers_sync_mutex, !m_peer_mutex,
1085 !m_headers_presync_mutex, g_msgproc_mutex);
1086
1091 bool IsAncestorOfBestHeaderOrTip(const CBlockIndex *header)
1093
1099 bool MaybeSendGetHeaders(CNode &pfrom, const CBlockLocator &locator,
1100 Peer &peer)
1101 EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
1105 void HeadersDirectFetchBlocks(const Config &config, CNode &pfrom,
1106 const CBlockIndex &last_header);
1108 void UpdatePeerStateForReceivedHeaders(CNode &pfrom, Peer &peer,
1109 const CBlockIndex &last_header,
1110 bool received_new_header,
1111 bool may_have_more_headers)
1112 EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
1113
1114 void SendBlockTransactions(CNode &pfrom, Peer &peer, const CBlock &block,
1115 const BlockTransactionsRequest &req);
1116
1122 void AddTxAnnouncement(const CNode &node, const TxId &txid,
1123 std::chrono::microseconds current_time)
1125
1131 void
1132 AddProofAnnouncement(const CNode &node, const avalanche::ProofId &proofid,
1133 std::chrono::microseconds current_time, bool preferred)
1134 EXCLUSIVE_LOCKS_REQUIRED(cs_proofrequest);
1135
1137 void PushNodeVersion(const Config &config, CNode &pnode, const Peer &peer);
1138
1145 void MaybeSendPing(CNode &node_to, Peer &peer,
1146 std::chrono::microseconds now);
1147
1149 void MaybeSendAddr(CNode &node, Peer &peer,
1150 std::chrono::microseconds current_time)
1151 EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
1152
1157 void MaybeSendSendHeaders(CNode &node, Peer &peer)
1158 EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
1159
1161 void MaybeSendFeefilter(CNode &node, Peer &peer,
1162 std::chrono::microseconds current_time)
1163 EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
1164
1174 void RelayAddress(NodeId originator, const CAddress &addr, bool fReachable)
1175 EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, g_msgproc_mutex);
1176
1178
1180 m_fee_filter_rounder GUARDED_BY(NetEventsInterface::g_msgproc_mutex);
1181
1182 const CChainParams &m_chainparams;
1183 CConnman &m_connman;
1184 AddrMan &m_addrman;
1189 BanMan *const m_banman;
1190 ChainstateManager &m_chainman;
1191 CTxMemPool &m_mempool;
1192 avalanche::Processor *const m_avalanche;
1194
1195 Mutex cs_proofrequest;
1197 m_proofrequest GUARDED_BY(cs_proofrequest);
1198
1200 std::atomic<int> m_best_height{-1};
1201
1203 std::chrono::seconds m_stale_tip_check_time GUARDED_BY(cs_main){0s};
1204
1205 const Options m_opts;
1206
1207 bool RejectIncomingTxs(const CNode &peer) const;
1208
1213 bool m_initial_sync_finished GUARDED_BY(cs_main){false};
1214
1219 mutable Mutex m_peer_mutex;
1226 std::map<NodeId, PeerRef> m_peer_map GUARDED_BY(m_peer_mutex);
1227
1229 std::map<NodeId, CNodeState> m_node_states GUARDED_BY(cs_main);
1230
1235 const CNodeState *State(NodeId pnode) const
1238 CNodeState *State(NodeId pnode) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
1239
1240 std::atomic<std::chrono::microseconds> m_next_inv_to_inbounds{0us};
1241
1243 int nSyncStarted GUARDED_BY(cs_main) = 0;
1244
1246 BlockHash
1247 m_last_block_inv_triggering_headers_sync GUARDED_BY(g_msgproc_mutex){};
1248
1255 std::map<BlockHash, std::pair<NodeId, bool>>
1256 mapBlockSource GUARDED_BY(cs_main);
1257
1259 int m_outbound_peers_with_protect_from_disconnect GUARDED_BY(cs_main) = 0;
1260
1262 int m_num_preferred_download_peers GUARDED_BY(cs_main){0};
1263
1265 std::atomic<std::chrono::seconds> m_block_stalling_timeout{
1267
1279 bool AlreadyHaveTx(const TxId &txid, bool include_reconsiderable)
1281 !m_recent_confirmed_transactions_mutex);
1282
1302 CRollingBloomFilter m_recent_rejects GUARDED_BY(::cs_main){120'000,
1303 0.000'001};
1304
1310 uint256 hashRecentRejectsChainTip GUARDED_BY(cs_main);
1311
1337 CRollingBloomFilter m_recent_rejects_package_reconsiderable
1338 GUARDED_BY(::cs_main){120'000, 0.000'001};
1339
1345 mutable Mutex m_recent_confirmed_transactions_mutex;
1346 CRollingBloomFilter m_recent_confirmed_transactions
1347 GUARDED_BY(m_recent_confirmed_transactions_mutex){24'000, 0.000'001};
1348
1356 std::chrono::microseconds
1357 NextInvToInbounds(std::chrono::microseconds now,
1358 std::chrono::seconds average_interval)
1359 EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
1360
1361 // All of the following cache a recent block, and are protected by
1362 // m_most_recent_block_mutex
1363 mutable Mutex m_most_recent_block_mutex;
1364 std::shared_ptr<const CBlock>
1365 m_most_recent_block GUARDED_BY(m_most_recent_block_mutex);
1366 std::shared_ptr<const CBlockHeaderAndShortTxIDs>
1367 m_most_recent_compact_block GUARDED_BY(m_most_recent_block_mutex);
1368 BlockHash m_most_recent_block_hash GUARDED_BY(m_most_recent_block_mutex);
1369 std::unique_ptr<const std::map<TxId, CTransactionRef>>
1370 m_most_recent_block_txs GUARDED_BY(m_most_recent_block_mutex);
1371
1372 // Data about the low-work headers synchronization, aggregated from all
1373 // peers' HeadersSyncStates.
1375 Mutex m_headers_presync_mutex;
1386 using HeadersPresyncStats =
1387 std::pair<arith_uint256, std::optional<std::pair<int64_t, uint32_t>>>;
1389 std::map<NodeId, HeadersPresyncStats>
1390 m_headers_presync_stats GUARDED_BY(m_headers_presync_mutex){};
1392 NodeId m_headers_presync_bestpeer GUARDED_BY(m_headers_presync_mutex){-1};
1394 std::atomic_bool m_headers_presync_should_signal{false};
1395
1399 int m_highest_fast_announce GUARDED_BY(::cs_main){0};
1400
1402 bool IsBlockRequested(const BlockHash &hash)
1404
1406 bool IsBlockRequestedFromOutbound(const BlockHash &hash)
1408
1417 void RemoveBlockRequest(const BlockHash &hash,
1418 std::optional<NodeId> from_peer)
1420
1427 bool BlockRequested(const Config &config, NodeId nodeid,
1428 const CBlockIndex &block,
1429 std::list<QueuedBlock>::iterator **pit = nullptr)
1431
1432 bool TipMayBeStale() EXCLUSIVE_LOCKS_REQUIRED(cs_main);
1433
1438 void FindNextBlocksToDownload(const Peer &peer, unsigned int count,
1439 std::vector<const CBlockIndex *> &vBlocks,
1440 NodeId &nodeStaller)
1442
1444 void TryDownloadingHistoricalBlocks(
1445 const Peer &peer, unsigned int count,
1446 std::vector<const CBlockIndex *> &vBlocks, const CBlockIndex *from_tip,
1447 const CBlockIndex *target_block) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
1448
1478 void FindNextBlocks(std::vector<const CBlockIndex *> &vBlocks,
1479 const Peer &peer, CNodeState *state,
1480 const CBlockIndex *pindexWalk, unsigned int count,
1481 int nWindowEnd, const CChain *activeChain = nullptr,
1482 NodeId *nodeStaller = nullptr)
1484
1486 typedef std::multimap<BlockHash,
1487 std::pair<NodeId, std::list<QueuedBlock>::iterator>>
1488 BlockDownloadMap;
1489 BlockDownloadMap mapBlocksInFlight GUARDED_BY(cs_main);
1490
1492 std::atomic<std::chrono::seconds> m_last_tip_update{0s};
1493
1498 CTransactionRef FindTxForGetData(const Peer &peer, const TxId &txid,
1499 const std::chrono::seconds mempool_req,
1500 const std::chrono::seconds now)
1502 EXCLUSIVE_LOCKS_REQUIRED(!m_most_recent_block_mutex,
1504
1505 void ProcessGetData(const Config &config, CNode &pfrom, Peer &peer,
1506 const std::atomic<bool> &interruptMsgProc)
1507 EXCLUSIVE_LOCKS_REQUIRED(!m_most_recent_block_mutex,
1508 peer.m_getdata_requests_mutex,
1511
1513 void ProcessBlock(const Config &config, CNode &node,
1514 const std::shared_ptr<const CBlock> &block,
1515 bool force_processing, bool min_pow_checked);
1516
1523 void MaybeSetPeerAsAnnouncingHeaderAndIDs(NodeId nodeid)
1525
1527 std::list<NodeId> lNodesAnnouncingHeaderAndIDs GUARDED_BY(cs_main);
1528
1530 int m_peers_downloading_from GUARDED_BY(cs_main) = 0;
1531
1532 void AddToCompactExtraTransactions(const CTransactionRef &tx)
1533 EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
1534
1542 std::vector<std::pair<TxHash, CTransactionRef>>
1543 vExtraTxnForCompact GUARDED_BY(g_msgproc_mutex);
1545 size_t vExtraTxnForCompactIt GUARDED_BY(g_msgproc_mutex) = 0;
1546
1550 void ProcessBlockAvailability(NodeId nodeid)
1555 void UpdateBlockAvailability(NodeId nodeid, const BlockHash &hash)
1557 bool CanDirectFetch() EXCLUSIVE_LOCKS_REQUIRED(cs_main);
1558
1565 bool BlockRequestAllowed(const CBlockIndex *pindex)
1567 bool AlreadyHaveBlock(const BlockHash &block_hash)
1569 bool AlreadyHaveProof(const avalanche::ProofId &proofid);
1570 void ProcessGetBlockData(const Config &config, CNode &pfrom, Peer &peer,
1571 const CInv &inv)
1572 EXCLUSIVE_LOCKS_REQUIRED(!m_most_recent_block_mutex);
1573
1593 bool PrepareBlockFilterRequest(CNode &node, Peer &peer,
1594 BlockFilterType filter_type,
1595 uint32_t start_height,
1596 const BlockHash &stop_hash,
1597 uint32_t max_height_diff,
1598 const CBlockIndex *&stop_index,
1599 BlockFilterIndex *&filter_index);
1600
1610 void ProcessGetCFilters(CNode &node, Peer &peer, CDataStream &vRecv);
1620 void ProcessGetCFHeaders(CNode &node, Peer &peer, CDataStream &vRecv);
1630 void ProcessGetCFCheckPt(CNode &node, Peer &peer, CDataStream &vRecv);
1631
1638 uint32_t GetAvalancheVoteForBlock(const BlockHash &hash) const
1640
1648 uint32_t GetAvalancheVoteForTx(const avalanche::Processor &avalanche,
1649 const TxId &id) const
1650 EXCLUSIVE_LOCKS_REQUIRED(!m_mempool.cs,
1651 !m_recent_confirmed_transactions_mutex);
1652
1660 bool SetupAddressRelay(const CNode &node, Peer &peer)
1661 EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
1662
1663 void AddAddressKnown(Peer &peer, const CAddress &addr)
1664 EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
1665 void PushAddress(Peer &peer, const CAddress &addr)
1666 EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
1667
1673 bool ReceivedAvalancheProof(CNode &node, Peer &peer,
1674 const avalanche::ProofRef &proof)
1675 EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !cs_proofrequest);
1676
1677 avalanche::ProofRef FindProofForGetData(const Peer &peer,
1678 const avalanche::ProofId &proofid,
1679 const std::chrono::seconds now)
1681
1682 bool isPreferredDownloadPeer(const CNode &pfrom);
1683};
1684
1685const CNodeState *PeerManagerImpl::State(NodeId pnode) const
1687 std::map<NodeId, CNodeState>::const_iterator it = m_node_states.find(pnode);
1688 if (it == m_node_states.end()) {
1689 return nullptr;
1690 }
1691
1692 return &it->second;
1693}
1694
1695CNodeState *PeerManagerImpl::State(NodeId pnode)
1697 return const_cast<CNodeState *>(std::as_const(*this).State(pnode));
1698}
1699
1705static bool IsAddrCompatible(const Peer &peer, const CAddress &addr) {
1706 return peer.m_wants_addrv2 || addr.IsAddrV1Compatible();
1707}
1708
1709void PeerManagerImpl::AddAddressKnown(Peer &peer, const CAddress &addr) {
1710 assert(peer.m_addr_known);
1711 peer.m_addr_known->insert(addr.GetKey());
1712}
1713
1714void PeerManagerImpl::PushAddress(Peer &peer, const CAddress &addr) {
1715 // Known checking here is only to save space from duplicates.
1716 // Before sending, we'll filter it again for known addresses that were
1717 // added after addresses were pushed.
1718 assert(peer.m_addr_known);
1719 if (addr.IsValid() && !peer.m_addr_known->contains(addr.GetKey()) &&
1720 IsAddrCompatible(peer, addr)) {
1721 if (peer.m_addrs_to_send.size() >= m_opts.max_addr_to_send) {
1722 peer.m_addrs_to_send[m_rng.randrange(peer.m_addrs_to_send.size())] =
1723 addr;
1724 } else {
1725 peer.m_addrs_to_send.push_back(addr);
1726 }
1727 }
1728}
1729
1730static void AddKnownTx(Peer &peer, const TxId &txid) {
1731 auto tx_relay = peer.GetTxRelay();
1732 if (!tx_relay) {
1733 return;
1734 }
1735
1736 LOCK(tx_relay->m_tx_inventory_mutex);
1737 tx_relay->m_tx_inventory_known_filter.insert(txid);
1738}
1739
1740static void AddKnownProof(Peer &peer, const avalanche::ProofId &proofid) {
1741 if (peer.m_proof_relay != nullptr) {
1742 LOCK(peer.m_proof_relay->m_proof_inventory_mutex);
1743 peer.m_proof_relay->m_proof_inventory_known_filter.insert(proofid);
1744 }
1745}
1746
1747bool PeerManagerImpl::isPreferredDownloadPeer(const CNode &pfrom) {
1748 LOCK(cs_main);
1749 const CNodeState *state = State(pfrom.GetId());
1750 return state && state->fPreferredDownload;
1751}
1753static bool CanServeBlocks(const Peer &peer) {
1754 return peer.m_their_services & (NODE_NETWORK | NODE_NETWORK_LIMITED);
1755}
1756
1761static bool IsLimitedPeer(const Peer &peer) {
1762 return (!(peer.m_their_services & NODE_NETWORK) &&
1763 (peer.m_their_services & NODE_NETWORK_LIMITED));
1764}
1765
1766std::chrono::microseconds
1767PeerManagerImpl::NextInvToInbounds(std::chrono::microseconds now,
1768 std::chrono::seconds average_interval) {
1769 if (m_next_inv_to_inbounds.load() < now) {
1770 // If this function were called from multiple threads simultaneously
1771 // it would possible that both update the next send variable, and return
1772 // a different result to their caller. This is not possible in practice
1773 // as only the net processing thread invokes this function.
1774 m_next_inv_to_inbounds =
1775 now + m_rng.rand_exp_duration(average_interval);
1776 }
1777 return m_next_inv_to_inbounds;
1778}
1779
1780bool PeerManagerImpl::IsBlockRequested(const BlockHash &hash) {
1781 return mapBlocksInFlight.count(hash);
1782}
1783
1784bool PeerManagerImpl::IsBlockRequestedFromOutbound(const BlockHash &hash) {
1785 for (auto range = mapBlocksInFlight.equal_range(hash);
1786 range.first != range.second; range.first++) {
1787 auto [nodeid, block_it] = range.first->second;
1788 CNodeState &nodestate = *Assert(State(nodeid));
1789 if (!nodestate.m_is_inbound) {
1790 return true;
1791 }
1792 }
1793
1794 return false;
1795}
1796
1797void PeerManagerImpl::RemoveBlockRequest(const BlockHash &hash,
1798 std::optional<NodeId> from_peer) {
1799 auto range = mapBlocksInFlight.equal_range(hash);
1800 if (range.first == range.second) {
1801 // Block was not requested from any peer
1802 return;
1803 }
1804
1805 // We should not have requested too many of this block
1806 Assume(mapBlocksInFlight.count(hash) <= MAX_CMPCTBLOCKS_INFLIGHT_PER_BLOCK);
1807
1808 while (range.first != range.second) {
1809 auto [node_id, list_it] = range.first->second;
1810
1811 if (from_peer && *from_peer != node_id) {
1812 range.first++;
1813 continue;
1814 }
1815
1816 CNodeState &state = *Assert(State(node_id));
1817
1818 if (state.vBlocksInFlight.begin() == list_it) {
1819 // First block on the queue was received, update the start download
1820 // time for the next one
1821 state.m_downloading_since =
1822 std::max(state.m_downloading_since,
1823 GetTime<std::chrono::microseconds>());
1824 }
1825 state.vBlocksInFlight.erase(list_it);
1826
1827 if (state.vBlocksInFlight.empty()) {
1828 // Last validated block on the queue for this peer was received.
1829 m_peers_downloading_from--;
1830 }
1831 state.m_stalling_since = 0us;
1832
1833 range.first = mapBlocksInFlight.erase(range.first);
1834 }
1835}
1836
1837bool PeerManagerImpl::BlockRequested(const Config &config, NodeId nodeid,
1838 const CBlockIndex &block,
1839 std::list<QueuedBlock>::iterator **pit) {
1840 const BlockHash &hash{block.GetBlockHash()};
1841
1842 CNodeState *state = State(nodeid);
1843 assert(state != nullptr);
1844
1845 Assume(mapBlocksInFlight.count(hash) <= MAX_CMPCTBLOCKS_INFLIGHT_PER_BLOCK);
1846
1847 // Short-circuit most stuff in case it is from the same node
1848 for (auto range = mapBlocksInFlight.equal_range(hash);
1849 range.first != range.second; range.first++) {
1850 if (range.first->second.first == nodeid) {
1851 if (pit) {
1852 *pit = &range.first->second.second;
1853 }
1854 return false;
1855 }
1856 }
1857
1858 // Make sure it's not being fetched already from same peer.
1859 RemoveBlockRequest(hash, nodeid);
1860
1861 std::list<QueuedBlock>::iterator it = state->vBlocksInFlight.insert(
1862 state->vBlocksInFlight.end(),
1863 {&block, std::unique_ptr<PartiallyDownloadedBlock>(
1864 pit ? new PartiallyDownloadedBlock(config, &m_mempool)
1865 : nullptr)});
1866 if (state->vBlocksInFlight.size() == 1) {
1867 // We're starting a block download (batch) from this peer.
1868 state->m_downloading_since = GetTime<std::chrono::microseconds>();
1869 m_peers_downloading_from++;
1870 }
1871
1872 auto itInFlight = mapBlocksInFlight.insert(
1873 std::make_pair(hash, std::make_pair(nodeid, it)));
1874
1875 if (pit) {
1876 *pit = &itInFlight->second.second;
1877 }
1878
1879 return true;
1880}
1881
1882void PeerManagerImpl::MaybeSetPeerAsAnnouncingHeaderAndIDs(NodeId nodeid) {
1884
1885 // When in -blocksonly mode, never request high-bandwidth mode from peers.
1886 // Our mempool will not contain the transactions necessary to reconstruct
1887 // the compact block.
1888 if (m_opts.ignore_incoming_txs) {
1889 return;
1890 }
1891
1892 CNodeState *nodestate = State(nodeid);
1893 if (!nodestate) {
1894 LogPrint(BCLog::NET, "node state unavailable: peer=%d\n", nodeid);
1895 return;
1896 }
1897 if (!nodestate->m_provides_cmpctblocks) {
1898 return;
1899 }
1900 int num_outbound_hb_peers = 0;
1901 for (std::list<NodeId>::iterator it = lNodesAnnouncingHeaderAndIDs.begin();
1902 it != lNodesAnnouncingHeaderAndIDs.end(); it++) {
1903 if (*it == nodeid) {
1904 lNodesAnnouncingHeaderAndIDs.erase(it);
1905 lNodesAnnouncingHeaderAndIDs.push_back(nodeid);
1906 return;
1907 }
1908 CNodeState *state = State(*it);
1909 if (state != nullptr && !state->m_is_inbound) {
1910 ++num_outbound_hb_peers;
1911 }
1912 }
1913 if (nodestate->m_is_inbound) {
1914 // If we're adding an inbound HB peer, make sure we're not removing
1915 // our last outbound HB peer in the process.
1916 if (lNodesAnnouncingHeaderAndIDs.size() >= 3 &&
1917 num_outbound_hb_peers == 1) {
1918 CNodeState *remove_node =
1919 State(lNodesAnnouncingHeaderAndIDs.front());
1920 if (remove_node != nullptr && !remove_node->m_is_inbound) {
1921 // Put the HB outbound peer in the second slot, so that it
1922 // doesn't get removed.
1923 std::swap(lNodesAnnouncingHeaderAndIDs.front(),
1924 *std::next(lNodesAnnouncingHeaderAndIDs.begin()));
1925 }
1926 }
1927 }
1928 m_connman.ForNode(nodeid, [this](CNode *pfrom) EXCLUSIVE_LOCKS_REQUIRED(
1929 ::cs_main) {
1931 if (lNodesAnnouncingHeaderAndIDs.size() >= 3) {
1932 // As per BIP152, we only get 3 of our peers to announce
1933 // blocks using compact encodings.
1934 m_connman.ForNode(
1935 lNodesAnnouncingHeaderAndIDs.front(), [this](CNode *pnodeStop) {
1936 m_connman.PushMessage(
1937 pnodeStop, CNetMsgMaker(pnodeStop->GetCommonVersion())
1938 .Make(NetMsgType::SENDCMPCT,
1939 /*high_bandwidth=*/false,
1940 /*version=*/CMPCTBLOCKS_VERSION));
1941 // save BIP152 bandwidth state: we select peer to be
1942 // low-bandwidth
1943 pnodeStop->m_bip152_highbandwidth_to = false;
1944 return true;
1945 });
1946 lNodesAnnouncingHeaderAndIDs.pop_front();
1947 }
1948 m_connman.PushMessage(pfrom,
1951 /*high_bandwidth=*/true,
1952 /*version=*/CMPCTBLOCKS_VERSION));
1953 // save BIP152 bandwidth state: we select peer to be high-bandwidth
1954 pfrom->m_bip152_highbandwidth_to = true;
1955 lNodesAnnouncingHeaderAndIDs.push_back(pfrom->GetId());
1956 return true;
1957 });
1958}
1959
1960bool PeerManagerImpl::TipMayBeStale() {
1962 const Consensus::Params &consensusParams = m_chainparams.GetConsensus();
1963 if (m_last_tip_update.load() == 0s) {
1964 m_last_tip_update = GetTime<std::chrono::seconds>();
1965 }
1966 return m_last_tip_update.load() <
1967 GetTime<std::chrono::seconds>() -
1968 std::chrono::seconds{consensusParams.nPowTargetSpacing *
1969 3} &&
1970 mapBlocksInFlight.empty();
1971}
1972
1973bool PeerManagerImpl::CanDirectFetch() {
1974 return m_chainman.ActiveChain().Tip()->Time() >
1975 GetAdjustedTime() -
1976 m_chainparams.GetConsensus().PowTargetSpacing() * 20;
1977}
1978
1979static bool PeerHasHeader(CNodeState *state, const CBlockIndex *pindex)
1981 if (state->pindexBestKnownBlock &&
1982 pindex == state->pindexBestKnownBlock->GetAncestor(pindex->nHeight)) {
1983 return true;
1984 }
1985 if (state->pindexBestHeaderSent &&
1986 pindex == state->pindexBestHeaderSent->GetAncestor(pindex->nHeight)) {
1987 return true;
1988 }
1989 return false;
1990}
1991
1992void PeerManagerImpl::ProcessBlockAvailability(NodeId nodeid) {
1993 CNodeState *state = State(nodeid);
1994 assert(state != nullptr);
1995
1996 if (!state->hashLastUnknownBlock.IsNull()) {
1997 const CBlockIndex *pindex =
1998 m_chainman.m_blockman.LookupBlockIndex(state->hashLastUnknownBlock);
1999 if (pindex && pindex->nChainWork > 0) {
2000 if (state->pindexBestKnownBlock == nullptr ||
2001 pindex->nChainWork >= state->pindexBestKnownBlock->nChainWork) {
2002 state->pindexBestKnownBlock = pindex;
2003 }
2004 state->hashLastUnknownBlock.SetNull();
2005 }
2006 }
2007}
2008
2009void PeerManagerImpl::UpdateBlockAvailability(NodeId nodeid,
2010 const BlockHash &hash) {
2011 CNodeState *state = State(nodeid);
2012 assert(state != nullptr);
2013
2014 ProcessBlockAvailability(nodeid);
2015
2016 const CBlockIndex *pindex = m_chainman.m_blockman.LookupBlockIndex(hash);
2017 if (pindex && pindex->nChainWork > 0) {
2018 // An actually better block was announced.
2019 if (state->pindexBestKnownBlock == nullptr ||
2020 pindex->nChainWork >= state->pindexBestKnownBlock->nChainWork) {
2021 state->pindexBestKnownBlock = pindex;
2022 }
2023 } else {
2024 // An unknown block was announced; just assume that the latest one is
2025 // the best one.
2026 state->hashLastUnknownBlock = hash;
2027 }
2028}
2029
2030// Logic for calculating which blocks to download from a given peer, given
2031// our current tip.
2032void PeerManagerImpl::FindNextBlocksToDownload(
2033 const Peer &peer, unsigned int count,
2034 std::vector<const CBlockIndex *> &vBlocks, NodeId &nodeStaller) {
2035 if (count == 0) {
2036 return;
2037 }
2038
2039 vBlocks.reserve(vBlocks.size() + count);
2040 CNodeState *state = State(peer.m_id);
2041 assert(state != nullptr);
2042
2043 // Make sure pindexBestKnownBlock is up to date, we'll need it.
2044 ProcessBlockAvailability(peer.m_id);
2045
2046 if (state->pindexBestKnownBlock == nullptr ||
2047 state->pindexBestKnownBlock->nChainWork <
2048 m_chainman.ActiveChain().Tip()->nChainWork ||
2049 state->pindexBestKnownBlock->nChainWork <
2050 m_chainman.MinimumChainWork()) {
2051 // This peer has nothing interesting.
2052 return;
2053 }
2054
2055 // When we sync with AssumeUtxo and discover the snapshot is not in the
2056 // peer's best chain, abort: We can't reorg to this chain due to missing
2057 // undo data until the background sync has finished, so downloading blocks
2058 // from it would be futile.
2059 const CBlockIndex *snap_base{m_chainman.GetSnapshotBaseBlock()};
2060 if (snap_base && state->pindexBestKnownBlock->GetAncestor(
2061 snap_base->nHeight) != snap_base) {
2063 "Not downloading blocks from peer=%d, which doesn't have the "
2064 "snapshot block in its best chain.\n",
2065 peer.m_id);
2066 return;
2067 }
2068
2069 // Bootstrap quickly by guessing a parent of our best tip is the forking
2070 // point. Guessing wrong in either direction is not a problem. Also reset
2071 // pindexLastCommonBlock after a snapshot was loaded, so that blocks after
2072 // the snapshot will be prioritised for download.
2073 if (state->pindexLastCommonBlock == nullptr ||
2074 (snap_base &&
2075 state->pindexLastCommonBlock->nHeight < snap_base->nHeight)) {
2076 state->pindexLastCommonBlock =
2077 m_chainman
2078 .ActiveChain()[std::min(state->pindexBestKnownBlock->nHeight,
2079 m_chainman.ActiveChain().Height())];
2080 }
2081
2082 // If the peer reorganized, our previous pindexLastCommonBlock may not be an
2083 // ancestor of its current tip anymore. Go back enough to fix that.
2084 state->pindexLastCommonBlock = LastCommonAncestor(
2085 state->pindexLastCommonBlock, state->pindexBestKnownBlock);
2086 if (state->pindexLastCommonBlock == state->pindexBestKnownBlock) {
2087 return;
2088 }
2089
2090 const CBlockIndex *pindexWalk = state->pindexLastCommonBlock;
2091 // Never fetch further than the best block we know the peer has, or more
2092 // than BLOCK_DOWNLOAD_WINDOW + 1 beyond the last linked block we have in
2093 // common with this peer. The +1 is so we can detect stalling, namely if we
2094 // would be able to download that next block if the window were 1 larger.
2095 int nWindowEnd =
2096 state->pindexLastCommonBlock->nHeight + BLOCK_DOWNLOAD_WINDOW;
2097
2098 FindNextBlocks(vBlocks, peer, state, pindexWalk, count, nWindowEnd,
2099 &m_chainman.ActiveChain(), &nodeStaller);
2100}
2101
2102void PeerManagerImpl::TryDownloadingHistoricalBlocks(
2103 const Peer &peer, unsigned int count,
2104 std::vector<const CBlockIndex *> &vBlocks, const CBlockIndex *from_tip,
2105 const CBlockIndex *target_block) {
2106 Assert(from_tip);
2107 Assert(target_block);
2108
2109 if (vBlocks.size() >= count) {
2110 return;
2111 }
2112
2113 vBlocks.reserve(count);
2114 CNodeState *state = Assert(State(peer.m_id));
2115
2116 if (state->pindexBestKnownBlock == nullptr ||
2117 state->pindexBestKnownBlock->GetAncestor(target_block->nHeight) !=
2118 target_block) {
2119 // This peer can't provide us the complete series of blocks leading up
2120 // to the assumeutxo snapshot base.
2121 //
2122 // Presumably this peer's chain has less work than our ActiveChain()'s
2123 // tip, or else we will eventually crash when we try to reorg to it. Let
2124 // other logic deal with whether we disconnect this peer.
2125 //
2126 // TODO at some point in the future, we might choose to request what
2127 // blocks this peer does have from the historical chain, despite it not
2128 // having a complete history beneath the snapshot base.
2129 return;
2130 }
2131
2132 FindNextBlocks(vBlocks, peer, state, from_tip, count,
2133 std::min<int>(from_tip->nHeight + BLOCK_DOWNLOAD_WINDOW,
2134 target_block->nHeight));
2135}
2136
2137void PeerManagerImpl::FindNextBlocks(std::vector<const CBlockIndex *> &vBlocks,
2138 const Peer &peer, CNodeState *state,
2139 const CBlockIndex *pindexWalk,
2140 unsigned int count, int nWindowEnd,
2141 const CChain *activeChain,
2142 NodeId *nodeStaller) {
2143 std::vector<const CBlockIndex *> vToFetch;
2144 int nMaxHeight =
2145 std::min<int>(state->pindexBestKnownBlock->nHeight, nWindowEnd + 1);
2146 NodeId waitingfor = -1;
2147 while (pindexWalk->nHeight < nMaxHeight) {
2148 // Read up to 128 (or more, if more blocks than that are needed)
2149 // successors of pindexWalk (towards pindexBestKnownBlock) into
2150 // vToFetch. We fetch 128, because CBlockIndex::GetAncestor may be as
2151 // expensive as iterating over ~100 CBlockIndex* entries anyway.
2152 int nToFetch = std::min(nMaxHeight - pindexWalk->nHeight,
2153 std::max<int>(count - vBlocks.size(), 128));
2154 vToFetch.resize(nToFetch);
2155 pindexWalk = state->pindexBestKnownBlock->GetAncestor(
2156 pindexWalk->nHeight + nToFetch);
2157 vToFetch[nToFetch - 1] = pindexWalk;
2158 for (unsigned int i = nToFetch - 1; i > 0; i--) {
2159 vToFetch[i - 1] = vToFetch[i]->pprev;
2160 }
2161
2162 // Iterate over those blocks in vToFetch (in forward direction), adding
2163 // the ones that are not yet downloaded and not in flight to vBlocks. In
2164 // the meantime, update pindexLastCommonBlock as long as all ancestors
2165 // are already downloaded, or if it's already part of our chain (and
2166 // therefore don't need it even if pruned).
2167 for (const CBlockIndex *pindex : vToFetch) {
2168 if (!pindex->IsValid(BlockValidity::TREE)) {
2169 // We consider the chain that this peer is on invalid.
2170 return;
2171 }
2172 if (pindex->nStatus.hasData() ||
2173 (activeChain && activeChain->Contains(pindex))) {
2174 if (activeChain && pindex->HaveNumChainTxs()) {
2175 state->pindexLastCommonBlock = pindex;
2176 }
2177 } else if (!IsBlockRequested(pindex->GetBlockHash())) {
2178 // The block is not already downloaded, and not yet in flight.
2179 if (pindex->nHeight > nWindowEnd) {
2180 // We reached the end of the window.
2181 if (vBlocks.size() == 0 && waitingfor != peer.m_id) {
2182 // We aren't able to fetch anything, but we would be if
2183 // the download window was one larger.
2184 if (nodeStaller) {
2185 *nodeStaller = waitingfor;
2186 }
2187 }
2188 return;
2189 }
2190 vBlocks.push_back(pindex);
2191 if (vBlocks.size() == count) {
2192 return;
2193 }
2194 } else if (waitingfor == -1) {
2195 // This is the first already-in-flight block.
2196 waitingfor =
2197 mapBlocksInFlight.lower_bound(pindex->GetBlockHash())
2198 ->second.first;
2199 }
2200 }
2201 }
2202}
2203
2204} // namespace
2205
2206template <class InvId>
2208 const InvRequestTracker<InvId> &requestTracker,
2209 const DataRequestParameters &requestParams) {
2210 return !node.HasPermission(
2211 requestParams.bypass_request_limits_permissions) &&
2212 requestTracker.Count(node.GetId()) >=
2213 requestParams.max_peer_announcements;
2214}
2215
2223template <class InvId>
2224static std::chrono::microseconds
2226 const InvRequestTracker<InvId> &requestTracker,
2227 const DataRequestParameters &requestParams,
2228 std::chrono::microseconds current_time, bool preferred) {
2229 auto delay = std::chrono::microseconds{0};
2230
2231 if (!preferred) {
2232 delay += requestParams.nonpref_peer_delay;
2233 }
2234
2235 if (!node.HasPermission(requestParams.bypass_request_limits_permissions) &&
2236 requestTracker.CountInFlight(node.GetId()) >=
2237 requestParams.max_peer_request_in_flight) {
2238 delay += requestParams.overloaded_peer_delay;
2239 }
2240
2241 return current_time + delay;
2242}
2243
2244void PeerManagerImpl::PushNodeVersion(const Config &config, CNode &pnode,
2245 const Peer &peer) {
2246 uint64_t my_services{peer.m_our_services};
2247 const int64_t nTime{count_seconds(GetTime<std::chrono::seconds>())};
2248 uint64_t nonce = pnode.GetLocalNonce();
2249 const int nNodeStartingHeight{m_best_height};
2250 NodeId nodeid = pnode.GetId();
2251 CAddress addr = pnode.addr;
2252 uint64_t extraEntropy = pnode.GetLocalExtraEntropy();
2253
2254 CService addr_you =
2255 addr.IsRoutable() && !IsProxy(addr) && addr.IsAddrV1Compatible()
2256 ? addr
2257 : CService();
2258 uint64_t your_services{addr.nServices};
2259
2260 const bool tx_relay{!RejectIncomingTxs(pnode)};
2261 m_connman.PushMessage(
2262 // your_services, addr_you: Together the pre-version-31402 serialization
2263 // of CAddress "addrYou" (without nTime)
2264 // my_services, CService(): Together the pre-version-31402 serialization
2265 // of CAddress "addrMe" (without nTime)
2267 .Make(NetMsgType::VERSION, PROTOCOL_VERSION, my_services,
2268 nTime, your_services, addr_you, my_services,
2269 CService(), nonce, userAgent(config),
2270 nNodeStartingHeight, tx_relay, extraEntropy));
2271
2272 if (fLogIPs) {
2274 "send version message: version %d, blocks=%d, them=%s, "
2275 "txrelay=%d, peer=%d\n",
2276 PROTOCOL_VERSION, nNodeStartingHeight, addr_you.ToString(),
2277 tx_relay, nodeid);
2278 } else {
2280 "send version message: version %d, blocks=%d, "
2281 "txrelay=%d, peer=%d\n",
2282 PROTOCOL_VERSION, nNodeStartingHeight, tx_relay, nodeid);
2283 }
2284}
2285
2286void PeerManagerImpl::AddTxAnnouncement(
2287 const CNode &node, const TxId &txid,
2288 std::chrono::microseconds current_time) {
2289 // For m_txrequest and state
2291
2292 if (TooManyAnnouncements(node, m_txrequest, TX_REQUEST_PARAMS)) {
2293 return;
2294 }
2295
2296 const bool preferred = isPreferredDownloadPeer(node);
2297 auto reqtime = ComputeRequestTime(node, m_txrequest, TX_REQUEST_PARAMS,
2298 current_time, preferred);
2299
2300 m_txrequest.ReceivedInv(node.GetId(), txid, preferred, reqtime);
2301}
2302
2303void PeerManagerImpl::AddProofAnnouncement(
2304 const CNode &node, const avalanche::ProofId &proofid,
2305 std::chrono::microseconds current_time, bool preferred) {
2306 // For m_proofrequest
2307 AssertLockHeld(cs_proofrequest);
2308
2309 if (TooManyAnnouncements(node, m_proofrequest, PROOF_REQUEST_PARAMS)) {
2310 return;
2311 }
2312
2313 auto reqtime = ComputeRequestTime(
2314 node, m_proofrequest, PROOF_REQUEST_PARAMS, current_time, preferred);
2315
2316 m_proofrequest.ReceivedInv(node.GetId(), proofid, preferred, reqtime);
2317}
2318
2319void PeerManagerImpl::UpdateLastBlockAnnounceTime(NodeId node,
2320 int64_t time_in_seconds) {
2321 LOCK(cs_main);
2322 CNodeState *state = State(node);
2323 if (state) {
2324 state->m_last_block_announcement = time_in_seconds;
2325 }
2326}
2327
2328void PeerManagerImpl::InitializeNode(const Config &config, CNode &node,
2329 ServiceFlags our_services) {
2330 NodeId nodeid = node.GetId();
2331 {
2332 LOCK(cs_main);
2333 m_node_states.emplace_hint(m_node_states.end(),
2334 std::piecewise_construct,
2335 std::forward_as_tuple(nodeid),
2336 std::forward_as_tuple(node.IsInboundConn()));
2337 assert(m_txrequest.Count(nodeid) == 0);
2338 }
2339
2340 if (NetPermissions::HasFlag(node.m_permission_flags,
2342 our_services = static_cast<ServiceFlags>(our_services | NODE_BLOOM);
2343 }
2344
2345 PeerRef peer = std::make_shared<Peer>(nodeid, our_services, !!m_avalanche);
2346 {
2347 LOCK(m_peer_mutex);
2348 m_peer_map.emplace_hint(m_peer_map.end(), nodeid, peer);
2349 }
2350 if (!node.IsInboundConn()) {
2351 PushNodeVersion(config, node, *peer);
2352 }
2353}
2354
2355void PeerManagerImpl::ReattemptInitialBroadcast(CScheduler &scheduler) {
2356 std::set<TxId> unbroadcast_txids = m_mempool.GetUnbroadcastTxs();
2357
2358 for (const TxId &txid : unbroadcast_txids) {
2359 // Sanity check: all unbroadcast txns should exist in the mempool
2360 if (m_mempool.exists(txid)) {
2361 RelayTransaction(txid);
2362 } else {
2363 m_mempool.RemoveUnbroadcastTx(txid, true);
2364 }
2365 }
2366
2367 if (m_avalanche) {
2368 // Get and sanitize the list of proofids to broadcast. The RelayProof
2369 // call is done in a second loop to avoid locking cs_vNodes while
2370 // cs_peerManager is locked which would cause a potential deadlock due
2371 // to reversed lock order.
2372 auto unbroadcasted_proofids =
2373 m_avalanche->withPeerManager([&](avalanche::PeerManager &pm) {
2374 auto unbroadcasted_proofids = pm.getUnbroadcastProofs();
2375
2376 auto it = unbroadcasted_proofids.begin();
2377 while (it != unbroadcasted_proofids.end()) {
2378 // Sanity check: all unbroadcast proofs should be bound to a
2379 // peer in the peermanager
2380 if (!pm.isBoundToPeer(*it)) {
2381 pm.removeUnbroadcastProof(*it);
2382 it = unbroadcasted_proofids.erase(it);
2383 continue;
2384 }
2385
2386 ++it;
2387 }
2388
2389 return unbroadcasted_proofids;
2390 });
2391
2392 // Remaining proofids are the ones to broadcast
2393 for (const auto &proofid : unbroadcasted_proofids) {
2394 RelayProof(proofid);
2395 }
2396 }
2397
2398 // Schedule next run for 10-15 minutes in the future.
2399 // We add randomness on every cycle to avoid the possibility of P2P
2400 // fingerprinting.
2401 const auto reattemptBroadcastInterval =
2402 10min + FastRandomContext().randrange<std::chrono::milliseconds>(5min);
2403 scheduler.scheduleFromNow([&] { ReattemptInitialBroadcast(scheduler); },
2404 reattemptBroadcastInterval);
2405}
2406
2407void PeerManagerImpl::UpdateAvalancheStatistics() const {
2408 m_connman.ForEachNode([](CNode *pnode) {
2410 });
2411}
2412
2413void PeerManagerImpl::AvalanchePeriodicNetworking(CScheduler &scheduler) const {
2414 const auto now = GetTime<std::chrono::seconds>();
2415 std::vector<NodeId> avanode_ids;
2416 bool fQuorumEstablished;
2417 bool fShouldRequestMoreNodes;
2418
2419 if (!m_avalanche) {
2420 // Not enabled or not ready yet, retry later
2421 goto scheduleLater;
2422 }
2423
2424 m_avalanche->sendDelayedAvahello();
2425
2426 fQuorumEstablished = m_avalanche->isQuorumEstablished();
2427 fShouldRequestMoreNodes =
2428 m_avalanche->withPeerManager([&](avalanche::PeerManager &pm) {
2429 return pm.shouldRequestMoreNodes();
2430 });
2431
2432 m_connman.ForEachNode([&](CNode *pnode) {
2433 // Build a list of the avalanche peers nodeids
2434 if (pnode->m_avalanche_enabled) {
2435 avanode_ids.push_back(pnode->GetId());
2436 }
2437
2438 PeerRef peer = GetPeerRef(pnode->GetId());
2439 if (peer == nullptr) {
2440 return;
2441 }
2442 // If a proof radix tree timed out, cleanup
2443 if (peer->m_proof_relay &&
2444 now > (peer->m_proof_relay->lastSharedProofsUpdate.load() +
2446 peer->m_proof_relay->sharedProofs = {};
2447 }
2448 });
2449
2450 if (avanode_ids.empty()) {
2451 // No node is available for messaging, retry later
2452 goto scheduleLater;
2453 }
2454
2455 Shuffle(avanode_ids.begin(), avanode_ids.end(), FastRandomContext());
2456
2457 // Request avalanche addresses from our peers
2458 for (NodeId avanodeId : avanode_ids) {
2459 const bool sentGetavaaddr =
2460 m_connman.ForNode(avanodeId, [&](CNode *pavanode) {
2461 if (!fQuorumEstablished || !pavanode->IsInboundConn()) {
2462 m_connman.PushMessage(
2463 pavanode, CNetMsgMaker(pavanode->GetCommonVersion())
2464 .Make(NetMsgType::GETAVAADDR));
2465 PeerRef peer = GetPeerRef(avanodeId);
2466 WITH_LOCK(peer->m_addr_token_bucket_mutex,
2467 peer->m_addr_token_bucket +=
2468 m_opts.max_addr_to_send);
2469 return true;
2470 }
2471 return false;
2472 });
2473
2474 // If we have no reason to believe that we need more nodes, only request
2475 // addresses from one of our peers.
2476 if (sentGetavaaddr && fQuorumEstablished && !fShouldRequestMoreNodes) {
2477 break;
2478 }
2479 }
2480
2481 if (m_chainman.IsInitialBlockDownload()) {
2482 // Don't request proofs while in IBD. We're likely to orphan them
2483 // because we don't have the UTXOs.
2484 goto scheduleLater;
2485 }
2486
2487 // If we never had an avaproofs message yet, be kind and only request to a
2488 // subset of our peers as we expect a ton of avaproofs message in the
2489 // process.
2490 if (m_avalanche->getAvaproofsNodeCounter() == 0) {
2491 avanode_ids.resize(std::min<size_t>(avanode_ids.size(), 3));
2492 }
2493
2494 for (NodeId nodeid : avanode_ids) {
2495 // Send a getavaproofs to all of our peers
2496 m_connman.ForNode(nodeid, [&](CNode *pavanode) {
2497 PeerRef peer = GetPeerRef(nodeid);
2498 if (peer->m_proof_relay) {
2499 m_connman.PushMessage(pavanode,
2500 CNetMsgMaker(pavanode->GetCommonVersion())
2502
2503 peer->m_proof_relay->compactproofs_requested = true;
2504 }
2505 return true;
2506 });
2507 }
2508
2509scheduleLater:
2510 // Schedule next run for 2-5 minutes in the future.
2511 // We add randomness on every cycle to avoid the possibility of P2P
2512 // fingerprinting.
2513 const auto avalanchePeriodicNetworkingInterval =
2514 2min + FastRandomContext().randrange<std::chrono::milliseconds>(3min);
2515 scheduler.scheduleFromNow([&] { AvalanchePeriodicNetworking(scheduler); },
2516 avalanchePeriodicNetworkingInterval);
2517}
2518
2519void PeerManagerImpl::FinalizeNode(const Config &config, const CNode &node) {
2520 NodeId nodeid = node.GetId();
2521 {
2522 LOCK(cs_main);
2523 {
2524 // We remove the PeerRef from g_peer_map here, but we don't always
2525 // destruct the Peer. Sometimes another thread is still holding a
2526 // PeerRef, so the refcount is >= 1. Be careful not to do any
2527 // processing here that assumes Peer won't be changed before it's
2528 // destructed.
2529 PeerRef peer = RemovePeer(nodeid);
2530 assert(peer != nullptr);
2531 LOCK(m_peer_mutex);
2532 m_peer_map.erase(nodeid);
2533 }
2534 CNodeState *state = State(nodeid);
2535 assert(state != nullptr);
2536
2537 if (state->fSyncStarted) {
2538 nSyncStarted--;
2539 }
2540
2541 for (const QueuedBlock &entry : state->vBlocksInFlight) {
2542 auto range =
2543 mapBlocksInFlight.equal_range(entry.pindex->GetBlockHash());
2544 while (range.first != range.second) {
2545 auto [node_id, list_it] = range.first->second;
2546 if (node_id != nodeid) {
2547 range.first++;
2548 } else {
2549 range.first = mapBlocksInFlight.erase(range.first);
2550 }
2551 }
2552 }
2553 m_mempool.withOrphanage([nodeid](TxOrphanage &orphanage) {
2554 orphanage.EraseForPeer(nodeid);
2555 });
2556 m_txrequest.DisconnectedPeer(nodeid);
2557 m_num_preferred_download_peers -= state->fPreferredDownload;
2558 m_peers_downloading_from -= (!state->vBlocksInFlight.empty());
2559 assert(m_peers_downloading_from >= 0);
2560 m_outbound_peers_with_protect_from_disconnect -=
2561 state->m_chain_sync.m_protect;
2562 assert(m_outbound_peers_with_protect_from_disconnect >= 0);
2563
2564 m_node_states.erase(nodeid);
2565
2566 if (m_node_states.empty()) {
2567 // Do a consistency check after the last peer is removed.
2568 assert(mapBlocksInFlight.empty());
2569 assert(m_num_preferred_download_peers == 0);
2570 assert(m_peers_downloading_from == 0);
2571 assert(m_outbound_peers_with_protect_from_disconnect == 0);
2572 assert(m_txrequest.Size() == 0);
2573 assert(m_mempool.withOrphanage([](const TxOrphanage &orphanage) {
2574 return orphanage.Size();
2575 }) == 0);
2576 }
2577 }
2578
2579 if (node.fSuccessfullyConnected && !node.IsBlockOnlyConn() &&
2580 !node.IsInboundConn()) {
2581 // Only change visible addrman state for full outbound peers. We don't
2582 // call Connected() for feeler connections since they don't have
2583 // fSuccessfullyConnected set.
2584 m_addrman.Connected(node.addr);
2585 }
2586 {
2587 LOCK(m_headers_presync_mutex);
2588 m_headers_presync_stats.erase(nodeid);
2589 }
2590
2591 WITH_LOCK(cs_proofrequest, m_proofrequest.DisconnectedPeer(nodeid));
2592
2593 LogPrint(BCLog::NET, "Cleared nodestate for peer=%d\n", nodeid);
2594}
2595
2596PeerRef PeerManagerImpl::GetPeerRef(NodeId id) const {
2597 LOCK(m_peer_mutex);
2598 auto it = m_peer_map.find(id);
2599 return it != m_peer_map.end() ? it->second : nullptr;
2600}
2601
2602PeerRef PeerManagerImpl::RemovePeer(NodeId id) {
2603 PeerRef ret;
2604 LOCK(m_peer_mutex);
2605 auto it = m_peer_map.find(id);
2606 if (it != m_peer_map.end()) {
2607 ret = std::move(it->second);
2608 m_peer_map.erase(it);
2609 }
2610 return ret;
2611}
2612
2613bool PeerManagerImpl::GetNodeStateStats(NodeId nodeid,
2614 CNodeStateStats &stats) const {
2615 {
2616 LOCK(cs_main);
2617 const CNodeState *state = State(nodeid);
2618 if (state == nullptr) {
2619 return false;
2620 }
2621 stats.nSyncHeight = state->pindexBestKnownBlock
2622 ? state->pindexBestKnownBlock->nHeight
2623 : -1;
2624 stats.nCommonHeight = state->pindexLastCommonBlock
2625 ? state->pindexLastCommonBlock->nHeight
2626 : -1;
2627 for (const QueuedBlock &queue : state->vBlocksInFlight) {
2628 if (queue.pindex) {
2629 stats.vHeightInFlight.push_back(queue.pindex->nHeight);
2630 }
2631 }
2632 }
2633
2634 PeerRef peer = GetPeerRef(nodeid);
2635 if (peer == nullptr) {
2636 return false;
2637 }
2638 stats.their_services = peer->m_their_services;
2639 stats.m_starting_height = peer->m_starting_height;
2640 // It is common for nodes with good ping times to suddenly become lagged,
2641 // due to a new block arriving or other large transfer.
2642 // Merely reporting pingtime might fool the caller into thinking the node
2643 // was still responsive, since pingtime does not update until the ping is
2644 // complete, which might take a while. So, if a ping is taking an unusually
2645 // long time in flight, the caller can immediately detect that this is
2646 // happening.
2647 auto ping_wait{0us};
2648 if ((0 != peer->m_ping_nonce_sent) &&
2649 (0 != peer->m_ping_start.load().count())) {
2650 ping_wait =
2651 GetTime<std::chrono::microseconds>() - peer->m_ping_start.load();
2652 }
2653
2654 if (auto tx_relay = peer->GetTxRelay()) {
2655 stats.m_relay_txs = WITH_LOCK(tx_relay->m_bloom_filter_mutex,
2656 return tx_relay->m_relay_txs);
2657 stats.m_fee_filter_received = tx_relay->m_fee_filter_received.load();
2658 } else {
2659 stats.m_relay_txs = false;
2661 }
2662
2663 stats.m_ping_wait = ping_wait;
2664 stats.m_addr_processed = peer->m_addr_processed.load();
2665 stats.m_addr_rate_limited = peer->m_addr_rate_limited.load();
2666 stats.m_addr_relay_enabled = peer->m_addr_relay_enabled.load();
2667 {
2668 LOCK(peer->m_headers_sync_mutex);
2669 if (peer->m_headers_sync) {
2670 stats.presync_height = peer->m_headers_sync->GetPresyncHeight();
2671 }
2672 }
2673
2674 return true;
2675}
2676
2677void PeerManagerImpl::AddToCompactExtraTransactions(const CTransactionRef &tx) {
2678 if (m_opts.max_extra_txs <= 0) {
2679 return;
2680 }
2681
2682 if (!vExtraTxnForCompact.size()) {
2683 vExtraTxnForCompact.resize(m_opts.max_extra_txs);
2684 }
2685
2686 vExtraTxnForCompact[vExtraTxnForCompactIt] =
2687 std::make_pair(tx->GetHash(), tx);
2688 vExtraTxnForCompactIt = (vExtraTxnForCompactIt + 1) % m_opts.max_extra_txs;
2689}
2690
2691void PeerManagerImpl::Misbehaving(Peer &peer, const std::string &message) {
2692 LOCK(peer.m_misbehavior_mutex);
2693
2694 const std::string message_prefixed =
2695 message.empty() ? "" : (": " + message);
2696 peer.m_should_discourage = true;
2697 LogPrint(BCLog::NET, "Misbehaving: peer=%d%s\n", peer.m_id,
2698 message_prefixed);
2699}
2700
2701void PeerManagerImpl::MaybePunishNodeForBlock(NodeId nodeid,
2702 const BlockValidationState &state,
2703 bool via_compact_block,
2704 const std::string &message) {
2705 PeerRef peer{GetPeerRef(nodeid)};
2706 switch (state.GetResult()) {
2708 break;
2710 // We didn't try to process the block because the header chain may
2711 // have too little work.
2712 break;
2713 // The node is providing invalid data:
2716 if (!via_compact_block) {
2717 if (peer) {
2718 Misbehaving(*peer, message);
2719 }
2720 return;
2721 }
2722 break;
2724 LOCK(cs_main);
2725 CNodeState *node_state = State(nodeid);
2726 if (node_state == nullptr) {
2727 break;
2728 }
2729
2730 // Ban outbound (but not inbound) peers if on an invalid chain.
2731 // Exempt HB compact block peers. Manual connections are always
2732 // protected from discouragement.
2733 if (!via_compact_block && !node_state->m_is_inbound) {
2734 if (peer) {
2735 Misbehaving(*peer, message);
2736 }
2737 return;
2738 }
2739 break;
2740 }
2744 if (peer) {
2745 Misbehaving(*peer, message);
2746 }
2747 return;
2748 // Conflicting (but not necessarily invalid) data or different policy:
2750 if (peer) {
2751 Misbehaving(*peer, message);
2752 }
2753 return;
2755 break;
2756 }
2757 if (message != "") {
2758 LogPrint(BCLog::NET, "peer=%d: %s\n", nodeid, message);
2759 }
2760}
2761
2762void PeerManagerImpl::MaybePunishNodeForTx(NodeId nodeid,
2763 const TxValidationState &state,
2764 const std::string &message) {
2765 PeerRef peer{GetPeerRef(nodeid)};
2766 switch (state.GetResult()) {
2768 break;
2769 // The node is providing invalid data:
2771 if (peer) {
2772 Misbehaving(*peer, message);
2773 }
2774 return;
2775 // Conflicting (but not necessarily invalid) data or different policy:
2788 break;
2789 }
2790 if (message != "") {
2791 LogPrint(BCLog::NET, "peer=%d: %s\n", nodeid, message);
2792 }
2793}
2794
2795bool PeerManagerImpl::BlockRequestAllowed(const CBlockIndex *pindex) {
2797 if (m_chainman.ActiveChain().Contains(pindex)) {
2798 return true;
2799 }
2800 return pindex->IsValid(BlockValidity::SCRIPTS) &&
2801 (m_chainman.m_best_header != nullptr) &&
2802 (m_chainman.m_best_header->GetBlockTime() - pindex->GetBlockTime() <
2805 *m_chainman.m_best_header, *pindex, *m_chainman.m_best_header,
2806 m_chainparams.GetConsensus()) < STALE_RELAY_AGE_LIMIT);
2807}
2808
2809std::optional<std::string>
2810PeerManagerImpl::FetchBlock(const Config &config, NodeId peer_id,
2811 const CBlockIndex &block_index) {
2812 if (m_chainman.m_blockman.LoadingBlocks()) {
2813 return "Loading blocks ...";
2814 }
2815
2816 LOCK(cs_main);
2817
2818 // Ensure this peer exists and hasn't been disconnected
2819 CNodeState *state = State(peer_id);
2820 if (state == nullptr) {
2821 return "Peer does not exist";
2822 }
2823
2824 // Forget about all prior requests
2825 RemoveBlockRequest(block_index.GetBlockHash(), std::nullopt);
2826
2827 // Mark block as in-flight
2828 if (!BlockRequested(config, peer_id, block_index)) {
2829 return "Already requested from this peer";
2830 }
2831
2832 // Construct message to request the block
2833 const BlockHash &hash{block_index.GetBlockHash()};
2834 const std::vector<CInv> invs{CInv(MSG_BLOCK, hash)};
2835
2836 // Send block request message to the peer
2837 if (!m_connman.ForNode(peer_id, [this, &invs](CNode *node) {
2838 const CNetMsgMaker msgMaker(node->GetCommonVersion());
2839 this->m_connman.PushMessage(
2840 node, msgMaker.Make(NetMsgType::GETDATA, invs));
2841 return true;
2842 })) {
2843 return "Node not fully connected";
2844 }
2845
2846 LogPrint(BCLog::NET, "Requesting block %s from peer=%d\n", hash.ToString(),
2847 peer_id);
2848 return std::nullopt;
2849}
2850
2851std::unique_ptr<PeerManager>
2852PeerManager::make(CConnman &connman, AddrMan &addrman, BanMan *banman,
2853 ChainstateManager &chainman, CTxMemPool &pool,
2854 avalanche::Processor *const avalanche, Options opts) {
2855 return std::make_unique<PeerManagerImpl>(connman, addrman, banman, chainman,
2856 pool, avalanche, opts);
2857}
2858
2859PeerManagerImpl::PeerManagerImpl(CConnman &connman, AddrMan &addrman,
2860 BanMan *banman, ChainstateManager &chainman,
2861 CTxMemPool &pool,
2863 Options opts)
2864 : m_rng{opts.deterministic_rng},
2865 m_fee_filter_rounder{CFeeRate{DEFAULT_MIN_RELAY_TX_FEE_PER_KB}, m_rng},
2866 m_chainparams(chainman.GetParams()), m_connman(connman),
2867 m_addrman(addrman), m_banman(banman), m_chainman(chainman),
2868 m_mempool(pool), m_avalanche(avalanche), m_opts{opts} {}
2869
2870void PeerManagerImpl::StartScheduledTasks(CScheduler &scheduler) {
2871 // Stale tip checking and peer eviction are on two different timers, but we
2872 // don't want them to get out of sync due to drift in the scheduler, so we
2873 // combine them in one function and schedule at the quicker (peer-eviction)
2874 // timer.
2875 static_assert(
2877 "peer eviction timer should be less than stale tip check timer");
2878 scheduler.scheduleEvery(
2879 [this]() {
2880 this->CheckForStaleTipAndEvictPeers();
2881 return true;
2882 },
2883 std::chrono::seconds{EXTRA_PEER_CHECK_INTERVAL});
2884
2885 // schedule next run for 10-15 minutes in the future
2886 const auto reattemptBroadcastInterval =
2887 10min + FastRandomContext().randrange<std::chrono::milliseconds>(5min);
2888 scheduler.scheduleFromNow([&] { ReattemptInitialBroadcast(scheduler); },
2889 reattemptBroadcastInterval);
2890
2891 // Update the avalanche statistics on a schedule
2892 scheduler.scheduleEvery(
2893 [this]() {
2894 UpdateAvalancheStatistics();
2895 return true;
2896 },
2898
2899 // schedule next run for 2-5 minutes in the future
2900 const auto avalanchePeriodicNetworkingInterval =
2901 2min + FastRandomContext().randrange<std::chrono::milliseconds>(3min);
2902 scheduler.scheduleFromNow([&] { AvalanchePeriodicNetworking(scheduler); },
2903 avalanchePeriodicNetworkingInterval);
2904}
2905
2912void PeerManagerImpl::BlockConnected(
2913 ChainstateRole role, const std::shared_ptr<const CBlock> &pblock,
2914 const CBlockIndex *pindex) {
2915 // Update this for all chainstate roles so that we don't mistakenly see
2916 // peers helping us do background IBD as having a stale tip.
2917 m_last_tip_update = GetTime<std::chrono::seconds>();
2918
2919 // In case the dynamic timeout was doubled once or more, reduce it slowly
2920 // back to its default value
2921 auto stalling_timeout = m_block_stalling_timeout.load();
2922 Assume(stalling_timeout >= BLOCK_STALLING_TIMEOUT_DEFAULT);
2923 if (stalling_timeout != BLOCK_STALLING_TIMEOUT_DEFAULT) {
2924 const auto new_timeout =
2925 std::max(std::chrono::duration_cast<std::chrono::seconds>(
2926 stalling_timeout * 0.85),
2928 if (m_block_stalling_timeout.compare_exchange_strong(stalling_timeout,
2929 new_timeout)) {
2930 LogPrint(BCLog::NET, "Decreased stalling timeout to %d seconds\n",
2931 count_seconds(new_timeout));
2932 }
2933 }
2934
2935 // The following tasks can be skipped since we don't maintain a mempool for
2936 // the ibd/background chainstate.
2937 if (role == ChainstateRole::BACKGROUND) {
2938 return;
2939 }
2940 m_mempool.withOrphanage([&pblock](TxOrphanage &orphanage) {
2941 orphanage.EraseForBlock(*pblock);
2942 });
2943 m_mempool.withConflicting([&pblock](TxConflicting &conflicting) {
2944 conflicting.EraseForBlock(*pblock);
2945 });
2946
2947 {
2948 LOCK(m_recent_confirmed_transactions_mutex);
2949 for (const CTransactionRef &ptx : pblock->vtx) {
2950 m_recent_confirmed_transactions.insert(ptx->GetId());
2951 }
2952 }
2953 {
2954 LOCK(cs_main);
2955 for (const auto &ptx : pblock->vtx) {
2956 m_txrequest.ForgetInvId(ptx->GetId());
2957 }
2958 }
2959}
2960
2961void PeerManagerImpl::BlockDisconnected(
2962 const std::shared_ptr<const CBlock> &block, const CBlockIndex *pindex) {
2963 // To avoid relay problems with transactions that were previously
2964 // confirmed, clear our filter of recently confirmed transactions whenever
2965 // there's a reorg.
2966 // This means that in a 1-block reorg (where 1 block is disconnected and
2967 // then another block reconnected), our filter will drop to having only one
2968 // block's worth of transactions in it, but that should be fine, since
2969 // presumably the most common case of relaying a confirmed transaction
2970 // should be just after a new block containing it is found.
2971 LOCK(m_recent_confirmed_transactions_mutex);
2972 m_recent_confirmed_transactions.reset();
2973}
2974
2979void PeerManagerImpl::NewPoWValidBlock(
2980 const CBlockIndex *pindex, const std::shared_ptr<const CBlock> &pblock) {
2981 std::shared_ptr<const CBlockHeaderAndShortTxIDs> pcmpctblock =
2982 std::make_shared<const CBlockHeaderAndShortTxIDs>(*pblock);
2983 const CNetMsgMaker msgMaker(PROTOCOL_VERSION);
2984
2985 LOCK(cs_main);
2986
2987 if (pindex->nHeight <= m_highest_fast_announce) {
2988 return;
2989 }
2990 m_highest_fast_announce = pindex->nHeight;
2991
2992 BlockHash hashBlock(pblock->GetHash());
2993 const std::shared_future<CSerializedNetMsg> lazy_ser{
2994 std::async(std::launch::deferred, [&] {
2995 return msgMaker.Make(NetMsgType::CMPCTBLOCK, *pcmpctblock);
2996 })};
2997
2998 {
2999 auto most_recent_block_txs =
3000 std::make_unique<std::map<TxId, CTransactionRef>>();
3001 for (const auto &tx : pblock->vtx) {
3002 most_recent_block_txs->emplace(tx->GetId(), tx);
3003 }
3004
3005 LOCK(m_most_recent_block_mutex);
3006 m_most_recent_block_hash = hashBlock;
3007 m_most_recent_block = pblock;
3008 m_most_recent_compact_block = pcmpctblock;
3009 m_most_recent_block_txs = std::move(most_recent_block_txs);
3010 }
3011
3012 m_connman.ForEachNode(
3013 [this, pindex, &lazy_ser, &hashBlock](CNode *pnode)
3016
3018 pnode->fDisconnect) {
3019 return;
3020 }
3021 ProcessBlockAvailability(pnode->GetId());
3022 CNodeState &state = *State(pnode->GetId());
3023 // If the peer has, or we announced to them the previous block
3024 // already, but we don't think they have this one, go ahead and
3025 // announce it.
3026 if (state.m_requested_hb_cmpctblocks &&
3027 !PeerHasHeader(&state, pindex) &&
3028 PeerHasHeader(&state, pindex->pprev)) {
3030 "%s sending header-and-ids %s to peer=%d\n",
3031 "PeerManager::NewPoWValidBlock",
3032 hashBlock.ToString(), pnode->GetId());
3033
3034 const CSerializedNetMsg &ser_cmpctblock{lazy_ser.get()};
3035 m_connman.PushMessage(pnode, ser_cmpctblock.Copy());
3036 state.pindexBestHeaderSent = pindex;
3037 }
3038 });
3039}
3040
3045void PeerManagerImpl::UpdatedBlockTip(const CBlockIndex *pindexNew,
3046 const CBlockIndex *pindexFork,
3047 bool fInitialDownload) {
3048 SetBestHeight(pindexNew->nHeight);
3049 SetServiceFlagsIBDCache(!fInitialDownload);
3050
3051 // Don't relay inventory during initial block download.
3052 if (fInitialDownload) {
3053 return;
3054 }
3055
3056 // Find the hashes of all blocks that weren't previously in the best chain.
3057 std::vector<BlockHash> vHashes;
3058 const CBlockIndex *pindexToAnnounce = pindexNew;
3059 while (pindexToAnnounce != pindexFork) {
3060 vHashes.push_back(pindexToAnnounce->GetBlockHash());
3061 pindexToAnnounce = pindexToAnnounce->pprev;
3062 if (vHashes.size() == MAX_BLOCKS_TO_ANNOUNCE) {
3063 // Limit announcements in case of a huge reorganization. Rely on the
3064 // peer's synchronization mechanism in that case.
3065 break;
3066 }
3067 }
3068
3069 {
3070 LOCK(m_peer_mutex);
3071 for (auto &it : m_peer_map) {
3072 Peer &peer = *it.second;
3073 LOCK(peer.m_block_inv_mutex);
3074 for (const BlockHash &hash : reverse_iterate(vHashes)) {
3075 peer.m_blocks_for_headers_relay.push_back(hash);
3076 }
3077 }
3078 }
3079
3080 m_connman.WakeMessageHandler();
3081}
3082
3087void PeerManagerImpl::BlockChecked(const CBlock &block,
3088 const BlockValidationState &state) {
3089 LOCK(cs_main);
3090
3091 const BlockHash hash = block.GetHash();
3092 std::map<BlockHash, std::pair<NodeId, bool>>::iterator it =
3093 mapBlockSource.find(hash);
3094
3095 // If the block failed validation, we know where it came from and we're
3096 // still connected to that peer, maybe punish.
3097 if (state.IsInvalid() && it != mapBlockSource.end() &&
3098 State(it->second.first)) {
3099 MaybePunishNodeForBlock(/*nodeid=*/it->second.first, state,
3100 /*via_compact_block=*/!it->second.second);
3101 }
3102 // Check that:
3103 // 1. The block is valid
3104 // 2. We're not in initial block download
3105 // 3. This is currently the best block we're aware of. We haven't updated
3106 // the tip yet so we have no way to check this directly here. Instead we
3107 // just check that there are currently no other blocks in flight.
3108 else if (state.IsValid() && !m_chainman.IsInitialBlockDownload() &&
3109 mapBlocksInFlight.count(hash) == mapBlocksInFlight.size()) {
3110 if (it != mapBlockSource.end()) {
3111 MaybeSetPeerAsAnnouncingHeaderAndIDs(it->second.first);
3112 }
3113 }
3114
3115 if (it != mapBlockSource.end()) {
3116 mapBlockSource.erase(it);
3117 }
3118}
3119
3121//
3122// Messages
3123//
3124
3125bool PeerManagerImpl::AlreadyHaveTx(const TxId &txid,
3126 bool include_reconsiderable) {
3127 if (m_chainman.ActiveChain().Tip()->GetBlockHash() !=
3128 hashRecentRejectsChainTip) {
3129 // If the chain tip has changed previously rejected transactions
3130 // might be now valid, e.g. due to a nLockTime'd tx becoming
3131 // valid, or a double-spend. Reset the rejects filter and give
3132 // those txs a second chance.
3133 hashRecentRejectsChainTip =
3134 m_chainman.ActiveChain().Tip()->GetBlockHash();
3135 m_recent_rejects.reset();
3136 m_recent_rejects_package_reconsiderable.reset();
3137 }
3138
3139 if (m_mempool.withOrphanage([&txid](const TxOrphanage &orphanage) {
3140 return orphanage.HaveTx(txid);
3141 })) {
3142 return true;
3143 }
3144
3145 if (m_mempool.withConflicting([&txid](const TxConflicting &conflicting) {
3146 return conflicting.HaveTx(txid);
3147 })) {
3148 return true;
3149 }
3150
3151 if (include_reconsiderable &&
3152 m_recent_rejects_package_reconsiderable.contains(txid)) {
3153 return true;
3154 }
3155
3156 {
3157 LOCK(m_recent_confirmed_transactions_mutex);
3158 if (m_recent_confirmed_transactions.contains(txid)) {
3159 return true;
3160 }
3161 }
3162
3163 return m_recent_rejects.contains(txid) || m_mempool.exists(txid);
3164}
3165
3166bool PeerManagerImpl::AlreadyHaveBlock(const BlockHash &block_hash) {
3167 return m_chainman.m_blockman.LookupBlockIndex(block_hash) != nullptr;
3168}
3169
3170bool PeerManagerImpl::AlreadyHaveProof(const avalanche::ProofId &proofid) {
3171 if (!Assume(m_avalanche)) {
3172 return false;
3173 }
3174
3175 auto localProof = m_avalanche->getLocalProof();
3176 if (localProof && localProof->getId() == proofid) {
3177 return true;
3178 }
3179
3180 return m_avalanche->withPeerManager([&proofid](avalanche::PeerManager &pm) {
3181 return pm.exists(proofid) || pm.isInvalid(proofid);
3182 });
3183}
3184
3185void PeerManagerImpl::SendPings() {
3186 LOCK(m_peer_mutex);
3187 for (auto &it : m_peer_map) {
3188 it.second->m_ping_queued = true;
3189 }
3190}
3191
3192void PeerManagerImpl::RelayTransaction(const TxId &txid) {
3193 LOCK(m_peer_mutex);
3194 for (auto &it : m_peer_map) {
3195 Peer &peer = *it.second;
3196 auto tx_relay = peer.GetTxRelay();
3197 if (!tx_relay) {
3198 continue;
3199 }
3200 LOCK(tx_relay->m_tx_inventory_mutex);
3201 // Only queue transactions for announcement once the version handshake
3202 // is completed. The time of arrival for these transactions is
3203 // otherwise at risk of leaking to a spy, if the spy is able to
3204 // distinguish transactions received during the handshake from the rest
3205 // in the announcement.
3206 if (tx_relay->m_next_inv_send_time == 0s) {
3207 continue;
3208 }
3209
3210 if (!tx_relay->m_tx_inventory_known_filter.contains(txid) ||
3211 tx_relay->m_avalanche_stalled_txids.count(txid) > 0) {
3212 tx_relay->m_tx_inventory_to_send.insert(txid);
3213 }
3214 }
3215}
3216
3217void PeerManagerImpl::RelayProof(const avalanche::ProofId &proofid) {
3218 LOCK(m_peer_mutex);
3219 for (auto &it : m_peer_map) {
3220 Peer &peer = *it.second;
3221
3222 if (!peer.m_proof_relay) {
3223 continue;
3224 }
3225 LOCK(peer.m_proof_relay->m_proof_inventory_mutex);
3226 if (!peer.m_proof_relay->m_proof_inventory_known_filter.contains(
3227 proofid)) {
3228 peer.m_proof_relay->m_proof_inventory_to_send.insert(proofid);
3229 }
3230 }
3231}
3232
3233void PeerManagerImpl::RelayAddress(NodeId originator, const CAddress &addr,
3234 bool fReachable) {
3235 // We choose the same nodes within a given 24h window (if the list of
3236 // connected nodes does not change) and we don't relay to nodes that already
3237 // know an address. So within 24h we will likely relay a given address once.
3238 // This is to prevent a peer from unjustly giving their address better
3239 // propagation by sending it to us repeatedly.
3240
3241 if (!fReachable && !addr.IsRelayable()) {
3242 return;
3243 }
3244
3245 // Relay to a limited number of other nodes
3246 // Use deterministic randomness to send to the same nodes for 24 hours
3247 // at a time so the m_addr_knowns of the chosen nodes prevent repeats
3248 const uint64_t hash_addr{CServiceHash(0, 0)(addr)};
3249 const auto current_time{GetTime<std::chrono::seconds>()};
3250 // Adding address hash makes exact rotation time different per address,
3251 // while preserving periodicity.
3252 const uint64_t time_addr{
3253 (static_cast<uint64_t>(count_seconds(current_time)) + hash_addr) /
3255
3256 const CSipHasher hasher{
3258 .Write(hash_addr)
3259 .Write(time_addr)};
3260
3261 // Relay reachable addresses to 2 peers. Unreachable addresses are relayed
3262 // randomly to 1 or 2 peers.
3263 unsigned int nRelayNodes = (fReachable || (hasher.Finalize() & 1)) ? 2 : 1;
3264 std::array<std::pair<uint64_t, Peer *>, 2> best{
3265 {{0, nullptr}, {0, nullptr}}};
3266 assert(nRelayNodes <= best.size());
3267
3268 LOCK(m_peer_mutex);
3269
3270 for (auto &[id, peer] : m_peer_map) {
3271 if (peer->m_addr_relay_enabled && id != originator &&
3272 IsAddrCompatible(*peer, addr)) {
3273 uint64_t hashKey = CSipHasher(hasher).Write(id).Finalize();
3274 for (unsigned int i = 0; i < nRelayNodes; i++) {
3275 if (hashKey > best[i].first) {
3276 std::copy(best.begin() + i, best.begin() + nRelayNodes - 1,
3277 best.begin() + i + 1);
3278 best[i] = std::make_pair(hashKey, peer.get());
3279 break;
3280 }
3281 }
3282 }
3283 };
3284
3285 for (unsigned int i = 0; i < nRelayNodes && best[i].first != 0; i++) {
3286 PushAddress(*best[i].second, addr);
3287 }
3288}
3289
3290void PeerManagerImpl::ProcessGetBlockData(const Config &config, CNode &pfrom,
3291 Peer &peer, const CInv &inv) {
3292 const BlockHash hash(inv.hash);
3293
3294 std::shared_ptr<const CBlock> a_recent_block;
3295 std::shared_ptr<const CBlockHeaderAndShortTxIDs> a_recent_compact_block;
3296 {
3297 LOCK(m_most_recent_block_mutex);
3298 a_recent_block = m_most_recent_block;
3299 a_recent_compact_block = m_most_recent_compact_block;
3300 }
3301
3302 bool need_activate_chain = false;
3303 {
3304 LOCK(cs_main);
3305 const CBlockIndex *pindex =
3306 m_chainman.m_blockman.LookupBlockIndex(hash);
3307 if (pindex) {
3308 if (pindex->HaveNumChainTxs() &&
3309 !pindex->IsValid(BlockValidity::SCRIPTS) &&
3310 pindex->IsValid(BlockValidity::TREE)) {
3311 // If we have the block and all of its parents, but have not yet
3312 // validated it, we might be in the middle of connecting it (ie
3313 // in the unlock of cs_main before ActivateBestChain but after
3314 // AcceptBlock). In this case, we need to run ActivateBestChain
3315 // prior to checking the relay conditions below.
3316 need_activate_chain = true;
3317 }
3318 }
3319 } // release cs_main before calling ActivateBestChain
3320 if (need_activate_chain) {
3322 if (!m_chainman.ActiveChainstate().ActivateBestChain(
3323 state, a_recent_block, m_avalanche)) {
3324 LogPrint(BCLog::NET, "failed to activate chain (%s)\n",
3325 state.ToString());
3326 }
3327 }
3328
3329 const CNetMsgMaker msgMaker(pfrom.GetCommonVersion());
3330 const CBlockIndex *pindex{nullptr};
3331 const CBlockIndex *tip{nullptr};
3332 bool can_direct_fetch{false};
3333 FlatFilePos block_pos{};
3334 {
3335 LOCK(cs_main);
3336 pindex = m_chainman.m_blockman.LookupBlockIndex(hash);
3337 if (!pindex) {
3338 return;
3339 }
3340 if (!BlockRequestAllowed(pindex)) {
3342 "%s: ignoring request from peer=%i for old "
3343 "block that isn't in the main chain\n",
3344 __func__, pfrom.GetId());
3345 return;
3346 }
3347 // Disconnect node in case we have reached the outbound limit for
3348 // serving historical blocks.
3349 if (m_connman.OutboundTargetReached(true) &&
3350 (((m_chainman.m_best_header != nullptr) &&
3351 (m_chainman.m_best_header->GetBlockTime() -
3352 pindex->GetBlockTime() >
3354 inv.IsMsgFilteredBlk()) &&
3355 // nodes with the download permission may exceed target
3357 LogPrint(
3358 BCLog::NET,
3359 "historical block serving limit reached, disconnect peer=%d\n",
3360 pfrom.GetId());
3361 pfrom.fDisconnect = true;
3362 return;
3363 }
3364 tip = m_chainman.ActiveChain().Tip();
3365 // Avoid leaking prune-height by never sending blocks below the
3366 // NODE_NETWORK_LIMITED threshold.
3367 // Add two blocks buffer extension for possible races
3369 ((((peer.m_our_services & NODE_NETWORK_LIMITED) ==
3371 ((peer.m_our_services & NODE_NETWORK) != NODE_NETWORK) &&
3372 (tip->nHeight - pindex->nHeight >
3373 (int)NODE_NETWORK_LIMITED_MIN_BLOCKS + 2)))) {
3375 "Ignore block request below NODE_NETWORK_LIMITED "
3376 "threshold, disconnect peer=%d\n",
3377 pfrom.GetId());
3378
3379 // disconnect node and prevent it from stalling (would otherwise
3380 // wait for the missing block)
3381 pfrom.fDisconnect = true;
3382 return;
3383 }
3384 // Pruned nodes may have deleted the block, so check whether it's
3385 // available before trying to send.
3386 if (!pindex->nStatus.hasData()) {
3387 return;
3388 }
3389 can_direct_fetch = CanDirectFetch();
3390 block_pos = pindex->GetBlockPos();
3391 }
3392
3393 std::shared_ptr<const CBlock> pblock;
3394 auto handle_block_read_error = [&]() {
3395 if (WITH_LOCK(m_chainman.GetMutex(),
3396 return m_chainman.m_blockman.IsBlockPruned(*pindex))) {
3398 "Block was pruned before it could be read, disconnect "
3399 "peer=%s\n",
3400 pfrom.GetId());
3401 } else {
3402 LogError("Cannot load block from disk, disconnect peer=%d\n",
3403 pfrom.GetId());
3404 }
3405 pfrom.fDisconnect = true;
3406 };
3407
3408 if (a_recent_block && a_recent_block->GetHash() == pindex->GetBlockHash()) {
3409 pblock = a_recent_block;
3410 } else if (!inv.IsMsgCmpctBlk()) {
3411 // Fast-path: in this case it is possible to serve the block directly
3412 // from disk, as the network format matches the format on disk
3413 std::vector<uint8_t> block_data;
3414 if (!m_chainman.m_blockman.ReadRawBlockFromDisk(block_data,
3415 block_pos)) {
3416 handle_block_read_error();
3417 return;
3418 }
3419 m_connman.PushMessage(
3420 &pfrom, msgMaker.Make(NetMsgType::BLOCK, Span{block_data}));
3421 // Don't set pblock as we've sent the block
3422 } else {
3423 // Send block from disk
3424 std::shared_ptr<CBlock> pblockRead = std::make_shared<CBlock>();
3425 if (!m_chainman.m_blockman.ReadBlockFromDisk(*pblockRead, block_pos)) {
3426 handle_block_read_error();
3427 return;
3428 }
3429 pblock = pblockRead;
3430 }
3431 if (pblock) {
3432 if (inv.IsMsgBlk()) {
3433 m_connman.PushMessage(&pfrom,
3434 msgMaker.Make(NetMsgType::BLOCK, *pblock));
3435 } else if (inv.IsMsgFilteredBlk()) {
3436 bool sendMerkleBlock = false;
3437 CMerkleBlock merkleBlock;
3438 if (auto tx_relay = peer.GetTxRelay()) {
3439 LOCK(tx_relay->m_bloom_filter_mutex);
3440 if (tx_relay->m_bloom_filter) {
3441 sendMerkleBlock = true;
3442 merkleBlock =
3443 CMerkleBlock(*pblock, *tx_relay->m_bloom_filter);
3444 }
3445 }
3446 if (sendMerkleBlock) {
3447 m_connman.PushMessage(
3448 &pfrom,
3449 msgMaker.Make(NetMsgType::MERKLEBLOCK, merkleBlock));
3450 // CMerkleBlock just contains hashes, so also push any
3451 // transactions in the block the client did not see. This avoids
3452 // hurting performance by pointlessly requiring a round-trip.
3453 // Note that there is currently no way for a node to request any
3454 // single transactions we didn't send here - they must either
3455 // disconnect and retry or request the full block. Thus, the
3456 // protocol spec specified allows for us to provide duplicate
3457 // txn here, however we MUST always provide at least what the
3458 // remote peer needs.
3459 typedef std::pair<size_t, uint256> PairType;
3460 for (PairType &pair : merkleBlock.vMatchedTxn) {
3461 m_connman.PushMessage(
3462 &pfrom, msgMaker.Make(NetMsgType::TX,
3463 *pblock->vtx[pair.first]));
3464 }
3465 }
3466 // else
3467 // no response
3468 } else if (inv.IsMsgCmpctBlk()) {
3469 // If a peer is asking for old blocks, we're almost guaranteed they
3470 // won't have a useful mempool to match against a compact block, and
3471 // we don't feel like constructing the object for them, so instead
3472 // we respond with the full, non-compact block.
3473 int nSendFlags = 0;
3474 if (can_direct_fetch &&
3475 pindex->nHeight >= tip->nHeight - MAX_CMPCTBLOCK_DEPTH) {
3476 if (a_recent_compact_block &&
3477 a_recent_compact_block->header.GetHash() ==
3478 pindex->GetBlockHash()) {
3479 m_connman.PushMessage(
3480 &pfrom, msgMaker.Make(NetMsgType::CMPCTBLOCK,
3481 *a_recent_compact_block));
3482 } else {
3483 CBlockHeaderAndShortTxIDs cmpctblock(*pblock);
3484 m_connman.PushMessage(&pfrom,
3485 msgMaker.Make(nSendFlags,
3487 cmpctblock));
3488 }
3489 } else {
3490 m_connman.PushMessage(
3491 &pfrom,
3492 msgMaker.Make(nSendFlags, NetMsgType::BLOCK, *pblock));
3493 }
3494 }
3495 }
3496
3497 {
3498 LOCK(peer.m_block_inv_mutex);
3499 // Trigger the peer node to send a getblocks request for the next
3500 // batch of inventory.
3501 if (hash == peer.m_continuation_block) {
3502 // Send immediately. This must send even if redundant, and
3503 // we want it right after the last block so they don't wait for
3504 // other stuff first.
3505 std::vector<CInv> vInv;
3506 vInv.push_back(CInv(MSG_BLOCK, tip->GetBlockHash()));
3507 m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::INV, vInv));
3508 peer.m_continuation_block = BlockHash();
3509 }
3510 }
3511}
3512
3514PeerManagerImpl::FindTxForGetData(const Peer &peer, const TxId &txid,
3515 const std::chrono::seconds mempool_req,
3516 const std::chrono::seconds now) {
3517 auto txinfo = m_mempool.info(txid);
3518 if (txinfo.tx) {
3519 // If a TX could have been INVed in reply to a MEMPOOL request,
3520 // or is older than UNCONDITIONAL_RELAY_DELAY, permit the request
3521 // unconditionally.
3522 if ((mempool_req.count() && txinfo.m_time <= mempool_req) ||
3523 txinfo.m_time <= now - UNCONDITIONAL_RELAY_DELAY) {
3524 return std::move(txinfo.tx);
3525 }
3526 }
3527
3528 {
3529 LOCK(cs_main);
3530
3531 // Otherwise, the transaction might have been announced recently.
3532 bool recent =
3533 Assume(peer.GetTxRelay())->m_recently_announced_invs.contains(txid);
3534 if (recent && txinfo.tx) {
3535 return std::move(txinfo.tx);
3536 }
3537
3538 // Or it might be from the most recent block
3539 {
3540 LOCK(m_most_recent_block_mutex);
3541 if (m_most_recent_block_txs != nullptr) {
3542 auto it = m_most_recent_block_txs->find(txid);
3543 if (it != m_most_recent_block_txs->end()) {
3544 return it->second;
3545 }
3546 }
3547 }
3548 }
3549
3550 return {};
3551}
3552
3556PeerManagerImpl::FindProofForGetData(const Peer &peer,
3557 const avalanche::ProofId &proofid,
3558 const std::chrono::seconds now) {
3559 avalanche::ProofRef proof;
3560
3561 bool send_unconditionally =
3562 m_avalanche->withPeerManager([&](const avalanche::PeerManager &pm) {
3563 return pm.forPeer(proofid, [&](const avalanche::Peer &peer) {
3564 proof = peer.proof;
3565
3566 // If we know that proof for long enough, allow for requesting
3567 // it.
3568 return peer.registration_time <=
3570 });
3571 });
3572
3573 if (!proof) {
3574 // Always send our local proof if it gets requested, assuming it's
3575 // valid. This will make it easier to bind with peers upon startup where
3576 // the status of our proof is unknown pending for a block. Note that it
3577 // still needs to have been announced first (presumably via an avahello
3578 // message).
3579 proof = m_avalanche->getLocalProof();
3580 }
3581
3582 // We don't have this proof
3583 if (!proof) {
3584 return avalanche::ProofRef();
3585 }
3586
3587 if (send_unconditionally) {
3588 return proof;
3589 }
3590
3591 // Otherwise, the proofs must have been announced recently.
3592 if (peer.m_proof_relay->m_recently_announced_proofs.contains(proofid)) {
3593 return proof;
3594 }
3595
3596 return avalanche::ProofRef();
3597}
3598
3599void PeerManagerImpl::ProcessGetData(
3600 const Config &config, CNode &pfrom, Peer &peer,
3601 const std::atomic<bool> &interruptMsgProc) {
3603
3604 auto tx_relay = peer.GetTxRelay();
3605
3606 std::deque<CInv>::iterator it = peer.m_getdata_requests.begin();
3607 std::vector<CInv> vNotFound;
3608 const CNetMsgMaker msgMaker(pfrom.GetCommonVersion());
3609
3610 const auto now{GetTime<std::chrono::seconds>()};
3611 // Get last mempool request time
3612 const auto mempool_req = tx_relay != nullptr
3613 ? tx_relay->m_last_mempool_req.load()
3614 : std::chrono::seconds::min();
3615
3616 // Process as many TX or AVA_PROOF items from the front of the getdata
3617 // queue as possible, since they're common and it's efficient to batch
3618 // process them.
3619 while (it != peer.m_getdata_requests.end()) {
3620 if (interruptMsgProc) {
3621 return;
3622 }
3623 // The send buffer provides backpressure. If there's no space in
3624 // the buffer, pause processing until the next call.
3625 if (pfrom.fPauseSend) {
3626 break;
3627 }
3628
3629 const CInv &inv = *it;
3630
3631 if (it->IsMsgProof()) {
3632 if (!m_avalanche) {
3633 vNotFound.push_back(inv);
3634 ++it;
3635 continue;
3636 }
3637 const avalanche::ProofId proofid(inv.hash);
3638 auto proof = FindProofForGetData(peer, proofid, now);
3639 if (proof) {
3640 m_connman.PushMessage(
3641 &pfrom, msgMaker.Make(NetMsgType::AVAPROOF, *proof));
3642 m_avalanche->withPeerManager([&](avalanche::PeerManager &pm) {
3643 pm.removeUnbroadcastProof(proofid);
3644 });
3645 } else {
3646 vNotFound.push_back(inv);
3647 }
3648
3649 ++it;
3650 continue;
3651 }
3652
3653 if (it->IsMsgTx()) {
3654 if (tx_relay == nullptr) {
3655 // Ignore GETDATA requests for transactions from
3656 // block-relay-only peers and peers that asked us not to
3657 // announce transactions.
3658 continue;
3659 }
3660
3661 const TxId txid(inv.hash);
3662 CTransactionRef tx = FindTxForGetData(peer, txid, mempool_req, now);
3663 if (tx) {
3664 int nSendFlags = 0;
3665 m_connman.PushMessage(
3666 &pfrom, msgMaker.Make(nSendFlags, NetMsgType::TX, *tx));
3667 m_mempool.RemoveUnbroadcastTx(txid);
3668 // As we're going to send tx, make sure its unconfirmed parents
3669 // are made requestable.
3670 std::vector<TxId> parent_ids_to_add;
3671 {
3672 LOCK(m_mempool.cs);
3673 auto txiter = m_mempool.GetIter(tx->GetId());
3674 if (txiter) {
3675 auto &pentry = *txiter;
3676 const CTxMemPoolEntry::Parents &parents =
3677 (*pentry)->GetMemPoolParentsConst();
3678 parent_ids_to_add.reserve(parents.size());
3679 for (const auto &parent : parents) {
3680 if (parent.get()->GetTime() >
3682 parent_ids_to_add.push_back(
3683 parent.get()->GetTx().GetId());
3684 }
3685 }
3686 }
3687 }
3688 for (const TxId &parent_txid : parent_ids_to_add) {
3689 // Relaying a transaction with a recent but unconfirmed
3690 // parent.
3691 if (WITH_LOCK(tx_relay->m_tx_inventory_mutex,
3692 return !tx_relay->m_tx_inventory_known_filter
3693 .contains(parent_txid))) {
3694 tx_relay->m_recently_announced_invs.insert(parent_txid);
3695 }
3696 }
3697 } else {
3698 vNotFound.push_back(inv);
3699 }
3700
3701 ++it;
3702 continue;
3703 }
3704
3705 // It's neither a proof nor a transaction
3706 break;
3707 }
3708
3709 // Only process one BLOCK item per call, since they're uncommon and can be
3710 // expensive to process.
3711 if (it != peer.m_getdata_requests.end() && !pfrom.fPauseSend) {
3712 const CInv &inv = *it++;
3713 if (inv.IsGenBlkMsg()) {
3714 ProcessGetBlockData(config, pfrom, peer, inv);
3715 }
3716 // else: If the first item on the queue is an unknown type, we erase it
3717 // and continue processing the queue on the next call.
3718 }
3719
3720 peer.m_getdata_requests.erase(peer.m_getdata_requests.begin(), it);
3721
3722 if (!vNotFound.empty()) {
3723 // Let the peer know that we didn't find what it asked for, so it
3724 // doesn't have to wait around forever. SPV clients care about this
3725 // message: it's needed when they are recursively walking the
3726 // dependencies of relevant unconfirmed transactions. SPV clients want
3727 // to do that because they want to know about (and store and rebroadcast
3728 // and risk analyze) the dependencies of transactions relevant to them,
3729 // without having to download the entire memory pool. Also, other nodes
3730 // can use these messages to automatically request a transaction from
3731 // some other peer that annnounced it, and stop waiting for us to
3732 // respond. In normal operation, we often send NOTFOUND messages for
3733 // parents of transactions that we relay; if a peer is missing a parent,
3734 // they may assume we have them and request the parents from us.
3735 m_connman.PushMessage(&pfrom,
3736 msgMaker.Make(NetMsgType::NOTFOUND, vNotFound));
3737 }
3738}
3739
3740void PeerManagerImpl::SendBlockTransactions(
3741 CNode &pfrom, Peer &peer, const CBlock &block,
3742 const BlockTransactionsRequest &req) {
3743 BlockTransactions resp(req);
3744 for (size_t i = 0; i < req.indices.size(); i++) {
3745 if (req.indices[i] >= block.vtx.size()) {
3746 Misbehaving(peer, "getblocktxn with out-of-bounds tx indices");
3747 return;
3748 }
3749 resp.txn[i] = block.vtx[req.indices[i]];
3750 }
3751 LOCK(cs_main);
3752 const CNetMsgMaker msgMaker(pfrom.GetCommonVersion());
3753 int nSendFlags = 0;
3754 m_connman.PushMessage(
3755 &pfrom, msgMaker.Make(nSendFlags, NetMsgType::BLOCKTXN, resp));
3756}
3757
3758bool PeerManagerImpl::CheckHeadersPoW(const std::vector<CBlockHeader> &headers,
3759 const Consensus::Params &consensusParams,
3760 Peer &peer) {
3761 // Do these headers have proof-of-work matching what's claimed?
3762 if (!HasValidProofOfWork(headers, consensusParams)) {
3763 Misbehaving(peer, "header with invalid proof of work");
3764 return false;
3765 }
3766
3767 // Are these headers connected to each other?
3768 if (!CheckHeadersAreContinuous(headers)) {
3769 Misbehaving(peer, "non-continuous headers sequence");
3770 return false;
3771 }
3772 return true;
3773}
3774
3775arith_uint256 PeerManagerImpl::GetAntiDoSWorkThreshold() {
3776 arith_uint256 near_chaintip_work = 0;
3777 LOCK(cs_main);
3778 if (m_chainman.ActiveChain().Tip() != nullptr) {
3779 const CBlockIndex *tip = m_chainman.ActiveChain().Tip();
3780 // Use a 144 block buffer, so that we'll accept headers that fork from
3781 // near our tip.
3782 near_chaintip_work =
3783 tip->nChainWork -
3784 std::min<arith_uint256>(144 * GetBlockProof(*tip), tip->nChainWork);
3785 }
3786 return std::max(near_chaintip_work, m_chainman.MinimumChainWork());
3787}
3788
3795void PeerManagerImpl::HandleUnconnectingHeaders(
3796 CNode &pfrom, Peer &peer, const std::vector<CBlockHeader> &headers) {
3797 const CNetMsgMaker msgMaker(pfrom.GetCommonVersion());
3798
3799 // Try to fill in the missing headers.
3800 const CBlockIndex *best_header{
3801 WITH_LOCK(cs_main, return m_chainman.m_best_header)};
3802 if (MaybeSendGetHeaders(pfrom, GetLocator(best_header), peer)) {
3803 LogPrint(
3804 BCLog::NET,
3805 "received header %s: missing prev block %s, sending getheaders "
3806 "(%d) to end (peer=%d)\n",
3807 headers[0].GetHash().ToString(),
3808 headers[0].hashPrevBlock.ToString(), best_header->nHeight,
3809 pfrom.GetId());
3810 }
3811
3812 // Set hashLastUnknownBlock for this peer, so that if we
3813 // eventually get the headers - even from a different peer -
3814 // we can use this peer to download.
3816 UpdateBlockAvailability(pfrom.GetId(), headers.back().GetHash()));
3817}
3818
3819bool PeerManagerImpl::CheckHeadersAreContinuous(
3820 const std::vector<CBlockHeader> &headers) const {
3821 BlockHash hashLastBlock;
3822 for (const CBlockHeader &header : headers) {
3823 if (!hashLastBlock.IsNull() && header.hashPrevBlock != hashLastBlock) {
3824 return false;
3825 }
3826 hashLastBlock = header.GetHash();
3827 }
3828 return true;
3829}
3830
3831bool PeerManagerImpl::IsContinuationOfLowWorkHeadersSync(
3832 Peer &peer, CNode &pfrom, std::vector<CBlockHeader> &headers) {
3833 if (peer.m_headers_sync) {
3834 auto result = peer.m_headers_sync->ProcessNextHeaders(
3835 headers, headers.size() == MAX_HEADERS_RESULTS);
3836 // If it is a valid continuation, we should treat the existing
3837 // getheaders request as responded to.
3838 if (result.success) {
3839 peer.m_last_getheaders_timestamp = {};
3840 }
3841 if (result.request_more) {
3842 auto locator = peer.m_headers_sync->NextHeadersRequestLocator();
3843 // If we were instructed to ask for a locator, it should not be
3844 // empty.
3845 Assume(!locator.vHave.empty());
3846 // We can only be instructed to request more if processing was
3847 // successful.
3848 Assume(result.success);
3849 if (!locator.vHave.empty()) {
3850 // It should be impossible for the getheaders request to fail,
3851 // because we just cleared the last getheaders timestamp.
3852 bool sent_getheaders =
3853 MaybeSendGetHeaders(pfrom, locator, peer);
3854 Assume(sent_getheaders);
3855 LogPrint(BCLog::NET, "more getheaders (from %s) to peer=%d\n",
3856 locator.vHave.front().ToString(), pfrom.GetId());
3857 }
3858 }
3859
3860 if (peer.m_headers_sync->GetState() == HeadersSyncState::State::FINAL) {
3861 peer.m_headers_sync.reset(nullptr);
3862
3863 // Delete this peer's entry in m_headers_presync_stats.
3864 // If this is m_headers_presync_bestpeer, it will be replaced later
3865 // by the next peer that triggers the else{} branch below.
3866 LOCK(m_headers_presync_mutex);
3867 m_headers_presync_stats.erase(pfrom.GetId());
3868 } else {
3869 // Build statistics for this peer's sync.
3870 HeadersPresyncStats stats;
3871 stats.first = peer.m_headers_sync->GetPresyncWork();
3872 if (peer.m_headers_sync->GetState() ==
3874 stats.second = {peer.m_headers_sync->GetPresyncHeight(),
3875 peer.m_headers_sync->GetPresyncTime()};
3876 }
3877
3878 // Update statistics in stats.
3879 LOCK(m_headers_presync_mutex);
3880 m_headers_presync_stats[pfrom.GetId()] = stats;
3881 auto best_it =
3882 m_headers_presync_stats.find(m_headers_presync_bestpeer);
3883 bool best_updated = false;
3884 if (best_it == m_headers_presync_stats.end()) {
3885 // If the cached best peer is outdated, iterate over all
3886 // remaining ones (including newly updated one) to find the best
3887 // one.
3888 NodeId peer_best{-1};
3889 const HeadersPresyncStats *stat_best{nullptr};
3890 for (const auto &[_peer, _stat] : m_headers_presync_stats) {
3891 if (!stat_best || _stat > *stat_best) {
3892 peer_best = _peer;
3893 stat_best = &_stat;
3894 }
3895 }
3896 m_headers_presync_bestpeer = peer_best;
3897 best_updated = (peer_best == pfrom.GetId());
3898 } else if (best_it->first == pfrom.GetId() ||
3899 stats > best_it->second) {
3900 // pfrom was and remains the best peer, or pfrom just became
3901 // best.
3902 m_headers_presync_bestpeer = pfrom.GetId();
3903 best_updated = true;
3904 }
3905 if (best_updated && stats.second.has_value()) {
3906 // If the best peer updated, and it is in its first phase,
3907 // signal.
3908 m_headers_presync_should_signal = true;
3909 }
3910 }
3911
3912 if (result.success) {
3913 // We only overwrite the headers passed in if processing was
3914 // successful.
3915 headers.swap(result.pow_validated_headers);
3916 }
3917
3918 return result.success;
3919 }
3920 // Either we didn't have a sync in progress, or something went wrong
3921 // processing these headers, or we are returning headers to the caller to
3922 // process.
3923 return false;
3924}
3925
3926bool PeerManagerImpl::TryLowWorkHeadersSync(
3927 Peer &peer, CNode &pfrom, const CBlockIndex *chain_start_header,
3928 std::vector<CBlockHeader> &headers) {
3929 // Calculate the total work on this chain.
3930 arith_uint256 total_work =
3931 chain_start_header->nChainWork + CalculateHeadersWork(headers);
3932
3933 // Our dynamic anti-DoS threshold (minimum work required on a headers chain
3934 // before we'll store it)
3935 arith_uint256 minimum_chain_work = GetAntiDoSWorkThreshold();
3936
3937 // Avoid DoS via low-difficulty-headers by only processing if the headers
3938 // are part of a chain with sufficient work.
3939 if (total_work < minimum_chain_work) {
3940 // Only try to sync with this peer if their headers message was full;
3941 // otherwise they don't have more headers after this so no point in
3942 // trying to sync their too-little-work chain.
3943 if (headers.size() == MAX_HEADERS_RESULTS) {
3944 // Note: we could advance to the last header in this set that is
3945 // known to us, rather than starting at the first header (which we
3946 // may already have); however this is unlikely to matter much since
3947 // ProcessHeadersMessage() already handles the case where all
3948 // headers in a received message are already known and are
3949 // ancestors of m_best_header or chainActive.Tip(), by skipping
3950 // this logic in that case. So even if the first header in this set
3951 // of headers is known, some header in this set must be new, so
3952 // advancing to the first unknown header would be a small effect.
3953 LOCK(peer.m_headers_sync_mutex);
3954 peer.m_headers_sync.reset(
3955 new HeadersSyncState(peer.m_id, m_chainparams.GetConsensus(),
3956 chain_start_header, minimum_chain_work));
3957
3958 // Now a HeadersSyncState object for tracking this synchronization
3959 // is created, process the headers using it as normal. Failures are
3960 // handled inside of IsContinuationOfLowWorkHeadersSync.
3961 (void)IsContinuationOfLowWorkHeadersSync(peer, pfrom, headers);
3962 } else {
3964 "Ignoring low-work chain (height=%u) from peer=%d\n",
3965 chain_start_header->nHeight + headers.size(),
3966 pfrom.GetId());
3967 }
3968 // The peer has not yet given us a chain that meets our work threshold,
3969 // so we want to prevent further processing of the headers in any case.
3970 headers = {};
3971 return true;
3972 }
3973
3974 return false;
3975}
3976
3977bool PeerManagerImpl::IsAncestorOfBestHeaderOrTip(const CBlockIndex *header) {
3978 return header != nullptr &&
3979 ((m_chainman.m_best_header != nullptr &&
3980 header ==
3981 m_chainman.m_best_header->GetAncestor(header->nHeight)) ||
3982 m_chainman.ActiveChain().Contains(header));
3983}
3984
3985bool PeerManagerImpl::MaybeSendGetHeaders(CNode &pfrom,
3986 const CBlockLocator &locator,
3987 Peer &peer) {
3988 const CNetMsgMaker msgMaker(pfrom.GetCommonVersion());
3989
3990 const auto current_time = NodeClock::now();
3991
3992 // Only allow a new getheaders message to go out if we don't have a recent
3993 // one already in-flight
3994 if (current_time - peer.m_last_getheaders_timestamp >
3996 m_connman.PushMessage(
3997 &pfrom, msgMaker.Make(NetMsgType::GETHEADERS, locator, uint256()));
3998 peer.m_last_getheaders_timestamp = current_time;
3999 return true;
4000 }
4001 return false;
4002}
4003
4010void PeerManagerImpl::HeadersDirectFetchBlocks(const Config &config,
4011 CNode &pfrom,
4012 const CBlockIndex &last_header) {
4013 const CNetMsgMaker msgMaker(pfrom.GetCommonVersion());
4014
4015 LOCK(cs_main);
4016 CNodeState *nodestate = State(pfrom.GetId());
4017
4018 if (CanDirectFetch() && last_header.IsValid(BlockValidity::TREE) &&
4019 m_chainman.ActiveChain().Tip()->nChainWork <= last_header.nChainWork) {
4020 std::vector<const CBlockIndex *> vToFetch;
4021 const CBlockIndex *pindexWalk{&last_header};
4022 // Calculate all the blocks we'd need to switch to last_header, up to
4023 // a limit.
4024 while (pindexWalk && !m_chainman.ActiveChain().Contains(pindexWalk) &&
4025 vToFetch.size() <= MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
4026 if (!pindexWalk->nStatus.hasData() &&
4027 !IsBlockRequested(pindexWalk->GetBlockHash())) {
4028 // We don't have this block, and it's not yet in flight.
4029 vToFetch.push_back(pindexWalk);
4030 }
4031 pindexWalk = pindexWalk->pprev;
4032 }
4033 // If pindexWalk still isn't on our main chain, we're looking at a
4034 // very large reorg at a time we think we're close to caught up to
4035 // the main chain -- this shouldn't really happen. Bail out on the
4036 // direct fetch and rely on parallel download instead.
4037 if (!m_chainman.ActiveChain().Contains(pindexWalk)) {
4038 LogPrint(BCLog::NET, "Large reorg, won't direct fetch to %s (%d)\n",
4039 last_header.GetBlockHash().ToString(),
4040 last_header.nHeight);
4041 } else {
4042 std::vector<CInv> vGetData;
4043 // Download as much as possible, from earliest to latest.
4044 for (const CBlockIndex *pindex : reverse_iterate(vToFetch)) {
4045 if (nodestate->vBlocksInFlight.size() >=
4047 // Can't download any more from this peer
4048 break;
4049 }
4050 vGetData.push_back(CInv(MSG_BLOCK, pindex->GetBlockHash()));
4051 BlockRequested(config, pfrom.GetId(), *pindex);
4052 LogPrint(BCLog::NET, "Requesting block %s from peer=%d\n",
4053 pindex->GetBlockHash().ToString(), pfrom.GetId());
4054 }
4055 if (vGetData.size() > 1) {
4057 "Downloading blocks toward %s (%d) via headers "
4058 "direct fetch\n",
4059 last_header.GetBlockHash().ToString(),
4060 last_header.nHeight);
4061 }
4062 if (vGetData.size() > 0) {
4063 if (!m_opts.ignore_incoming_txs &&
4064 nodestate->m_provides_cmpctblocks && vGetData.size() == 1 &&
4065 mapBlocksInFlight.size() == 1 &&
4066 last_header.pprev->IsValid(BlockValidity::CHAIN)) {
4067 // In any case, we want to download using a compact
4068 // block, not a regular one.
4069 vGetData[0] = CInv(MSG_CMPCT_BLOCK, vGetData[0].hash);
4070 }
4071 m_connman.PushMessage(
4072 &pfrom, msgMaker.Make(NetMsgType::GETDATA, vGetData));
4073 }
4074 }
4075 }
4076}
4077
4083void PeerManagerImpl::UpdatePeerStateForReceivedHeaders(
4084 CNode &pfrom, Peer &peer, const CBlockIndex &last_header,
4085 bool received_new_header, bool may_have_more_headers) {
4086 LOCK(cs_main);
4087
4088 CNodeState *nodestate = State(pfrom.GetId());
4089
4090 UpdateBlockAvailability(pfrom.GetId(), last_header.GetBlockHash());
4091
4092 // From here, pindexBestKnownBlock should be guaranteed to be non-null,
4093 // because it is set in UpdateBlockAvailability. Some nullptr checks are
4094 // still present, however, as belt-and-suspenders.
4095
4096 if (received_new_header &&
4097 last_header.nChainWork > m_chainman.ActiveChain().Tip()->nChainWork) {
4098 nodestate->m_last_block_announcement = GetTime();
4099 }
4100
4101 // If we're in IBD, we want outbound peers that will serve us a useful
4102 // chain. Disconnect peers that are on chains with insufficient work.
4103 if (m_chainman.IsInitialBlockDownload() && !may_have_more_headers) {
4104 // When nCount < MAX_HEADERS_RESULTS, we know we have no more
4105 // headers to fetch from this peer.
4106 if (nodestate->pindexBestKnownBlock &&
4107 nodestate->pindexBestKnownBlock->nChainWork <
4108 m_chainman.MinimumChainWork()) {
4109 // This peer has too little work on their headers chain to help
4110 // us sync -- disconnect if it is an outbound disconnection
4111 // candidate.
4112 // Note: We compare their tip to the minimum chain work (rather than
4113 // m_chainman.ActiveChain().Tip()) because we won't start block
4114 // download until we have a headers chain that has at least
4115 // the minimum chain work, even if a peer has a chain past our tip,
4116 // as an anti-DoS measure.
4117 if (pfrom.IsOutboundOrBlockRelayConn()) {
4118 LogPrintf("Disconnecting outbound peer %d -- headers "
4119 "chain has insufficient work\n",
4120 pfrom.GetId());
4121 pfrom.fDisconnect = true;
4122 }
4123 }
4124 }
4125
4126 // If this is an outbound full-relay peer, check to see if we should
4127 // protect it from the bad/lagging chain logic.
4128 // Note that outbound block-relay peers are excluded from this
4129 // protection, and thus always subject to eviction under the bad/lagging
4130 // chain logic.
4131 // See ChainSyncTimeoutState.
4132 if (!pfrom.fDisconnect && pfrom.IsFullOutboundConn() &&
4133 nodestate->pindexBestKnownBlock != nullptr) {
4134 if (m_outbound_peers_with_protect_from_disconnect <
4136 nodestate->pindexBestKnownBlock->nChainWork >=
4137 m_chainman.ActiveChain().Tip()->nChainWork &&
4138 !nodestate->m_chain_sync.m_protect) {
4139 LogPrint(BCLog::NET, "Protecting outbound peer=%d from eviction\n",
4140 pfrom.GetId());
4141 nodestate->m_chain_sync.m_protect = true;
4142 ++m_outbound_peers_with_protect_from_disconnect;
4143 }
4144 }
4145}
4146
4147void PeerManagerImpl::ProcessHeadersMessage(const Config &config, CNode &pfrom,
4148 Peer &peer,
4149 std::vector<CBlockHeader> &&headers,
4150 bool via_compact_block) {
4151 size_t nCount = headers.size();
4152
4153 if (nCount == 0) {
4154 // Nothing interesting. Stop asking this peers for more headers.
4155 // If we were in the middle of headers sync, receiving an empty headers
4156 // message suggests that the peer suddenly has nothing to give us
4157 // (perhaps it reorged to our chain). Clear download state for this
4158 // peer.
4159 LOCK(peer.m_headers_sync_mutex);
4160 if (peer.m_headers_sync) {
4161 peer.m_headers_sync.reset(nullptr);
4162 LOCK(m_headers_presync_mutex);
4163 m_headers_presync_stats.erase(pfrom.GetId());
4164 }
4165 // A headers message with no headers cannot be an announcement, so
4166 // assume it is a response to our last getheaders request, if there is
4167 // one.
4168 peer.m_last_getheaders_timestamp = {};
4169 return;
4170 }
4171
4172 // Before we do any processing, make sure these pass basic sanity checks.
4173 // We'll rely on headers having valid proof-of-work further down, as an
4174 // anti-DoS criteria (note: this check is required before passing any
4175 // headers into HeadersSyncState).
4176 if (!CheckHeadersPoW(headers, m_chainparams.GetConsensus(), peer)) {
4177 // Misbehaving() calls are handled within CheckHeadersPoW(), so we can
4178 // just return. (Note that even if a header is announced via compact
4179 // block, the header itself should be valid, so this type of error can
4180 // always be punished.)
4181 return;
4182 }
4183
4184 const CBlockIndex *pindexLast = nullptr;
4185
4186 // We'll set already_validated_work to true if these headers are
4187 // successfully processed as part of a low-work headers sync in progress
4188 // (either in PRESYNC or REDOWNLOAD phase).
4189 // If true, this will mean that any headers returned to us (ie during
4190 // REDOWNLOAD) can be validated without further anti-DoS checks.
4191 bool already_validated_work = false;
4192
4193 // If we're in the middle of headers sync, let it do its magic.
4194 bool have_headers_sync = false;
4195 {
4196 LOCK(peer.m_headers_sync_mutex);
4197
4198 already_validated_work =
4199 IsContinuationOfLowWorkHeadersSync(peer, pfrom, headers);
4200
4201 // The headers we passed in may have been:
4202 // - untouched, perhaps if no headers-sync was in progress, or some
4203 // failure occurred
4204 // - erased, such as if the headers were successfully processed and no
4205 // additional headers processing needs to take place (such as if we
4206 // are still in PRESYNC)
4207 // - replaced with headers that are now ready for validation, such as
4208 // during the REDOWNLOAD phase of a low-work headers sync.
4209 // So just check whether we still have headers that we need to process,
4210 // or not.
4211 if (headers.empty()) {
4212 return;
4213 }
4214
4215 have_headers_sync = !!peer.m_headers_sync;
4216 }
4217
4218 // Do these headers connect to something in our block index?
4219 const CBlockIndex *chain_start_header{
4221 headers[0].hashPrevBlock))};
4222 bool headers_connect_blockindex{chain_start_header != nullptr};
4223
4224 if (!headers_connect_blockindex) {
4225 // This could be a BIP 130 block announcement, use
4226 // special logic for handling headers that don't connect, as this
4227 // could be benign.
4228 HandleUnconnectingHeaders(pfrom, peer, headers);
4229 return;
4230 }
4231
4232 // If headers connect, assume that this is in response to any outstanding
4233 // getheaders request we may have sent, and clear out the time of our last
4234 // request. Non-connecting headers cannot be a response to a getheaders
4235 // request.
4236 peer.m_last_getheaders_timestamp = {};
4237
4238 // If the headers we received are already in memory and an ancestor of
4239 // m_best_header or our tip, skip anti-DoS checks. These headers will not
4240 // use any more memory (and we are not leaking information that could be
4241 // used to fingerprint us).
4242 const CBlockIndex *last_received_header{nullptr};
4243 {
4244 LOCK(cs_main);
4245 last_received_header =
4246 m_chainman.m_blockman.LookupBlockIndex(headers.back().GetHash());
4247 if (IsAncestorOfBestHeaderOrTip(last_received_header)) {
4248 already_validated_work = true;
4249 }
4250 }
4251
4252 // If our peer has NetPermissionFlags::NoBan privileges, then bypass our
4253 // anti-DoS logic (this saves bandwidth when we connect to a trusted peer
4254 // on startup).
4256 already_validated_work = true;
4257 }
4258
4259 // At this point, the headers connect to something in our block index.
4260 // Do anti-DoS checks to determine if we should process or store for later
4261 // processing.
4262 if (!already_validated_work &&
4263 TryLowWorkHeadersSync(peer, pfrom, chain_start_header, headers)) {
4264 // If we successfully started a low-work headers sync, then there
4265 // should be no headers to process any further.
4266 Assume(headers.empty());
4267 return;
4268 }
4269
4270 // At this point, we have a set of headers with sufficient work on them
4271 // which can be processed.
4272
4273 // If we don't have the last header, then this peer will have given us
4274 // something new (if these headers are valid).
4275 bool received_new_header{last_received_header == nullptr};
4276
4277 // Now process all the headers.
4279 if (!m_chainman.ProcessNewBlockHeaders(headers, /*min_pow_checked=*/true,
4280 state, &pindexLast)) {
4281 if (state.IsInvalid()) {
4282 MaybePunishNodeForBlock(pfrom.GetId(), state, via_compact_block,
4283 "invalid header received");
4284 return;
4285 }
4286 }
4287 assert(pindexLast);
4288
4289 // Consider fetching more headers if we are not using our headers-sync
4290 // mechanism.
4291 if (nCount == MAX_HEADERS_RESULTS && !have_headers_sync) {
4292 // Headers message had its maximum size; the peer may have more headers.
4293 if (MaybeSendGetHeaders(pfrom, GetLocator(pindexLast), peer)) {
4294 LogPrint(
4295 BCLog::NET,
4296 "more getheaders (%d) to end to peer=%d (startheight:%d)\n",
4297 pindexLast->nHeight, pfrom.GetId(), peer.m_starting_height);
4298 }
4299 }
4300
4301 UpdatePeerStateForReceivedHeaders(pfrom, peer, *pindexLast,
4302 received_new_header,
4303 nCount == MAX_HEADERS_RESULTS);
4304
4305 // Consider immediately downloading blocks.
4306 HeadersDirectFetchBlocks(config, pfrom, *pindexLast);
4307}
4308
4309void PeerManagerImpl::ProcessInvalidTx(NodeId nodeid,
4310 const CTransactionRef &ptx,
4311 const TxValidationState &state,
4312 bool maybe_add_extra_compact_tx) {
4313 AssertLockNotHeld(m_peer_mutex);
4314 AssertLockHeld(g_msgproc_mutex);
4316
4317 const TxId &txid = ptx->GetId();
4318
4319 LogPrint(BCLog::MEMPOOLREJ, "%s from peer=%d was not accepted: %s\n",
4320 txid.ToString(), nodeid, state.ToString());
4321
4323 return;
4324 }
4325
4326 if (m_avalanche &&
4327 m_avalanche->isPreconsensusActivated(m_chainman.ActiveTip()) &&
4329 return;
4330 }
4331
4333 // If the result is TX_PACKAGE_RECONSIDERABLE, add it to
4334 // m_recent_rejects_package_reconsiderable because we should not
4335 // download or submit this transaction by itself again, but may submit
4336 // it as part of a package later.
4337 m_recent_rejects_package_reconsiderable.insert(txid);
4338 } else {
4339 m_recent_rejects.insert(txid);
4340 }
4341 m_txrequest.ForgetInvId(txid);
4342
4343 if (maybe_add_extra_compact_tx && RecursiveDynamicUsage(*ptx) < 100000) {
4344 AddToCompactExtraTransactions(ptx);
4345 }
4346
4347 MaybePunishNodeForTx(nodeid, state);
4348
4349 // If the tx failed in ProcessOrphanTx, it should be removed from the
4350 // orphanage unless the tx was still missing inputs. If the tx was not in
4351 // the orphanage, EraseTx does nothing and returns 0.
4352 if (m_mempool.withOrphanage([&txid](TxOrphanage &orphanage) {
4353 return orphanage.EraseTx(txid);
4354 }) > 0) {
4355 LogPrint(BCLog::TXPACKAGES, " removed orphan tx %s\n",
4356 txid.ToString());
4357 }
4358}
4359
4360void PeerManagerImpl::ProcessValidTx(NodeId nodeid, const CTransactionRef &tx) {
4361 AssertLockNotHeld(m_peer_mutex);
4362 AssertLockHeld(g_msgproc_mutex);
4364
4365 // As this version of the transaction was acceptable, we can forget about
4366 // any requests for it. No-op if the tx is not in txrequest.
4367 m_txrequest.ForgetInvId(tx->GetId());
4368
4369 m_mempool.withOrphanage([&tx](TxOrphanage &orphanage) {
4370 orphanage.AddChildrenToWorkSet(*tx);
4371 // If it came from the orphanage, remove it. No-op if the tx is not in
4372 // txorphanage.
4373 orphanage.EraseTx(tx->GetId());
4374 });
4375
4376 LogPrint(
4378 "AcceptToMemoryPool: peer=%d: accepted %s (poolsz %u txn, %u kB)\n",
4379 nodeid, tx->GetId().ToString(), m_mempool.size(),
4380 m_mempool.DynamicMemoryUsage() / 1000);
4381
4382 RelayTransaction(tx->GetId());
4383}
4384
4385void PeerManagerImpl::ProcessPackageResult(
4386 const PackageToValidate &package_to_validate,
4387 const PackageMempoolAcceptResult &package_result) {
4388 AssertLockNotHeld(m_peer_mutex);
4389 AssertLockHeld(g_msgproc_mutex);
4391
4392 const auto &package = package_to_validate.m_txns;
4393 const auto &senders = package_to_validate.m_senders;
4394
4395 if (package_result.m_state.IsInvalid()) {
4396 m_recent_rejects_package_reconsiderable.insert(GetPackageHash(package));
4397 }
4398 // We currently only expect to process 1-parent-1-child packages. Remove if
4399 // this changes.
4400 if (!Assume(package.size() == 2)) {
4401 return;
4402 }
4403
4404 // Iterate backwards to erase in-package descendants from the orphanage
4405 // before they become relevant in AddChildrenToWorkSet.
4406 auto package_iter = package.rbegin();
4407 auto senders_iter = senders.rbegin();
4408 while (package_iter != package.rend()) {
4409 const auto &tx = *package_iter;
4410 const NodeId nodeid = *senders_iter;
4411 const auto it_result{package_result.m_tx_results.find(tx->GetId())};
4412
4413 // It is not guaranteed that a result exists for every transaction.
4414 if (it_result != package_result.m_tx_results.end()) {
4415 const auto &tx_result = it_result->second;
4416 switch (tx_result.m_result_type) {
4418 ProcessValidTx(nodeid, tx);
4419 break;
4420 }
4422 // Don't add to vExtraTxnForCompact, as these transactions
4423 // should have already been added there when added to the
4424 // orphanage or rejected for TX_PACKAGE_RECONSIDERABLE.
4425 // This should be updated if package submission is ever used
4426 // for transactions that haven't already been validated
4427 // before.
4428 ProcessInvalidTx(nodeid, tx, tx_result.m_state,
4429 /*maybe_add_extra_compact_tx=*/false);
4430 break;
4431 }
4433 // AlreadyHaveTx() should be catching transactions that are
4434 // already in mempool.
4435 Assume(false);
4436 break;
4437 }
4438 }
4439 }
4440 package_iter++;
4441 senders_iter++;
4442 }
4443}
4444
4445std::optional<PeerManagerImpl::PackageToValidate>
4446PeerManagerImpl::Find1P1CPackage(const CTransactionRef &ptx, NodeId nodeid) {
4447 AssertLockNotHeld(m_peer_mutex);
4448 AssertLockHeld(g_msgproc_mutex);
4450
4451 const auto &parent_txid{ptx->GetId()};
4452
4453 Assume(m_recent_rejects_package_reconsiderable.contains(parent_txid));
4454
4455 // Prefer children from this peer. This helps prevent censorship attempts in
4456 // which an attacker sends lots of fake children for the parent, and we
4457 // (unluckily) keep selecting the fake children instead of the real one
4458 // provided by the honest peer.
4459 const auto cpfp_candidates_same_peer{
4460 m_mempool.withOrphanage([&ptx, nodeid](const TxOrphanage &orphanage) {
4461 return orphanage.GetChildrenFromSamePeer(ptx, nodeid);
4462 })};
4463
4464 // These children should be sorted from newest to oldest.
4465 for (const auto &child : cpfp_candidates_same_peer) {
4466 Package maybe_cpfp_package{ptx, child};
4467 if (!m_recent_rejects_package_reconsiderable.contains(
4468 GetPackageHash(maybe_cpfp_package))) {
4469 return PeerManagerImpl::PackageToValidate{ptx, child, nodeid,
4470 nodeid};
4471 }
4472 }
4473
4474 // If no suitable candidate from the same peer is found, also try children
4475 // that were provided by a different peer. This is useful because sometimes
4476 // multiple peers announce both transactions to us, and we happen to
4477 // download them from different peers (we wouldn't have known that these 2
4478 // transactions are related). We still want to find 1p1c packages then.
4479 //
4480 // If we start tracking all announcers of orphans, we can restrict this
4481 // logic to parent + child pairs in which both were provided by the same
4482 // peer, i.e. delete this step.
4483 const auto cpfp_candidates_different_peer{
4484 m_mempool.withOrphanage([&ptx, nodeid](const TxOrphanage &orphanage) {
4485 return orphanage.GetChildrenFromDifferentPeer(ptx, nodeid);
4486 })};
4487
4488 // Find the first 1p1c that hasn't already been rejected. We randomize the
4489 // order to not create a bias that attackers can use to delay package
4490 // acceptance.
4491 //
4492 // Create a random permutation of the indices.
4493 std::vector<size_t> tx_indices(cpfp_candidates_different_peer.size());
4494 std::iota(tx_indices.begin(), tx_indices.end(), 0);
4495 Shuffle(tx_indices.begin(), tx_indices.end(), m_rng);
4496
4497 for (const auto index : tx_indices) {
4498 // If we already tried a package and failed for any reason, the combined
4499 // hash was cached in m_recent_rejects_package_reconsiderable.
4500 const auto [child_tx, child_sender] =
4501 cpfp_candidates_different_peer.at(index);
4502 Package maybe_cpfp_package{ptx, child_tx};
4503 if (!m_recent_rejects_package_reconsiderable.contains(
4504 GetPackageHash(maybe_cpfp_package))) {
4505 return PeerManagerImpl::PackageToValidate{ptx, child_tx, nodeid,
4506 child_sender};
4507 }
4508 }
4509 return std::nullopt;
4510}
4511
4512bool PeerManagerImpl::ProcessOrphanTx(const Config &config, Peer &peer) {
4513 AssertLockHeld(g_msgproc_mutex);
4514 LOCK(cs_main);
4515
4516 while (CTransactionRef porphanTx =
4517 m_mempool.withOrphanage([&peer](TxOrphanage &orphanage) {
4518 return orphanage.GetTxToReconsider(peer.m_id);
4519 })) {
4520 const MempoolAcceptResult result =
4521 m_chainman.ProcessTransaction(porphanTx);
4522 const TxValidationState &state = result.m_state;
4523 const TxId &orphanTxId = porphanTx->GetId();
4524
4526 LogPrint(BCLog::TXPACKAGES, " accepted orphan tx %s\n",
4527 orphanTxId.ToString());
4528 ProcessValidTx(peer.m_id, porphanTx);
4529 return true;
4530 }
4531
4534 " invalid orphan tx %s from peer=%d. %s\n",
4535 orphanTxId.ToString(), peer.m_id, state.ToString());
4536
4537 if (Assume(state.IsInvalid() &&
4539 state.GetResult() !=
4541 ProcessInvalidTx(peer.m_id, porphanTx, state,
4542 /*maybe_add_extra_compact_tx=*/false);
4543 }
4544
4545 return true;
4546 }
4547 }
4548
4549 return false;
4550}
4551
4552bool PeerManagerImpl::PrepareBlockFilterRequest(
4553 CNode &node, Peer &peer, BlockFilterType filter_type, uint32_t start_height,
4554 const BlockHash &stop_hash, uint32_t max_height_diff,
4555 const CBlockIndex *&stop_index, BlockFilterIndex *&filter_index) {
4556 const bool supported_filter_type =
4557 (filter_type == BlockFilterType::BASIC &&
4558 (peer.m_our_services & NODE_COMPACT_FILTERS));
4559 if (!supported_filter_type) {
4561 "peer %d requested unsupported block filter type: %d\n",
4562 node.GetId(), static_cast<uint8_t>(filter_type));
4563 node.fDisconnect = true;
4564 return false;
4565 }
4566
4567 {
4568 LOCK(cs_main);
4569 stop_index = m_chainman.m_blockman.LookupBlockIndex(stop_hash);
4570
4571 // Check that the stop block exists and the peer would be allowed to
4572 // fetch it.
4573 if (!stop_index || !BlockRequestAllowed(stop_index)) {
4574 LogPrint(BCLog::NET, "peer %d requested invalid block hash: %s\n",
4575 node.GetId(), stop_hash.ToString());
4576 node.fDisconnect = true;
4577 return false;
4578 }
4579 }
4580
4581 uint32_t stop_height = stop_index->nHeight;
4582 if (start_height > stop_height) {
4583 LogPrint(
4584 BCLog::NET,
4585 "peer %d sent invalid getcfilters/getcfheaders with " /* Continued
4586 */
4587 "start height %d and stop height %d\n",
4588 node.GetId(), start_height, stop_height);
4589 node.fDisconnect = true;
4590 return false;
4591 }
4592 if (stop_height - start_height >= max_height_diff) {
4594 "peer %d requested too many cfilters/cfheaders: %d / %d\n",
4595 node.GetId(), stop_height - start_height + 1, max_height_diff);
4596 node.fDisconnect = true;
4597 return false;
4598 }
4599
4600 filter_index = GetBlockFilterIndex(filter_type);
4601 if (!filter_index) {
4602 LogPrint(BCLog::NET, "Filter index for supported type %s not found\n",
4603 BlockFilterTypeName(filter_type));
4604 return false;
4605 }
4606
4607 return true;
4608}
4609
4610void PeerManagerImpl::ProcessGetCFilters(CNode &node, Peer &peer,
4611 CDataStream &vRecv) {
4612 uint8_t filter_type_ser;
4613 uint32_t start_height;
4614 BlockHash stop_hash;
4615
4616 vRecv >> filter_type_ser >> start_height >> stop_hash;
4617
4618 const BlockFilterType filter_type =
4619 static_cast<BlockFilterType>(filter_type_ser);
4620
4621 const CBlockIndex *stop_index;
4622 BlockFilterIndex *filter_index;
4623 if (!PrepareBlockFilterRequest(node, peer, filter_type, start_height,
4624 stop_hash, MAX_GETCFILTERS_SIZE, stop_index,
4625 filter_index)) {
4626 return;
4627 }
4628
4629 std::vector<BlockFilter> filters;
4630 if (!filter_index->LookupFilterRange(start_height, stop_index, filters)) {
4632 "Failed to find block filter in index: filter_type=%s, "
4633 "start_height=%d, stop_hash=%s\n",
4634 BlockFilterTypeName(filter_type), start_height,
4635 stop_hash.ToString());
4636 return;
4637 }
4638
4639 for (const auto &filter : filters) {
4640 CSerializedNetMsg msg = CNetMsgMaker(node.GetCommonVersion())
4641 .Make(NetMsgType::CFILTER, filter);
4642 m_connman.PushMessage(&node, std::move(msg));
4643 }
4644}
4645
4646void PeerManagerImpl::ProcessGetCFHeaders(CNode &node, Peer &peer,
4647 CDataStream &vRecv) {
4648 uint8_t filter_type_ser;
4649 uint32_t start_height;
4650 BlockHash stop_hash;
4651
4652 vRecv >> filter_type_ser >> start_height >> stop_hash;
4653
4654 const BlockFilterType filter_type =
4655 static_cast<BlockFilterType>(filter_type_ser);
4656
4657 const CBlockIndex *stop_index;
4658 BlockFilterIndex *filter_index;
4659 if (!PrepareBlockFilterRequest(node, peer, filter_type, start_height,
4660 stop_hash, MAX_GETCFHEADERS_SIZE, stop_index,
4661 filter_index)) {
4662 return;
4663 }
4664
4665 uint256 prev_header;
4666 if (start_height > 0) {
4667 const CBlockIndex *const prev_block =
4668 stop_index->GetAncestor(static_cast<int>(start_height - 1));
4669 if (!filter_index->LookupFilterHeader(prev_block, prev_header)) {
4671 "Failed to find block filter header in index: "
4672 "filter_type=%s, block_hash=%s\n",
4673 BlockFilterTypeName(filter_type),
4674 prev_block->GetBlockHash().ToString());
4675 return;
4676 }
4677 }
4678
4679 std::vector<uint256> filter_hashes;
4680 if (!filter_index->LookupFilterHashRange(start_height, stop_index,
4681 filter_hashes)) {
4683 "Failed to find block filter hashes in index: filter_type=%s, "
4684 "start_height=%d, stop_hash=%s\n",
4685 BlockFilterTypeName(filter_type), start_height,
4686 stop_hash.ToString());
4687 return;
4688 }
4689
4690 CSerializedNetMsg msg =
4691 CNetMsgMaker(node.GetCommonVersion())
4692 .Make(NetMsgType::CFHEADERS, filter_type_ser,
4693 stop_index->GetBlockHash(), prev_header, filter_hashes);
4694 m_connman.PushMessage(&node, std::move(msg));
4695}
4696
4697void PeerManagerImpl::ProcessGetCFCheckPt(CNode &node, Peer &peer,
4698 CDataStream &vRecv) {
4699 uint8_t filter_type_ser;
4700 BlockHash stop_hash;
4701
4702 vRecv >> filter_type_ser >> stop_hash;
4703
4704 const BlockFilterType filter_type =
4705 static_cast<BlockFilterType>(filter_type_ser);
4706
4707 const CBlockIndex *stop_index;
4708 BlockFilterIndex *filter_index;
4709 if (!PrepareBlockFilterRequest(
4710 node, peer, filter_type, /*start_height=*/0, stop_hash,
4711 /*max_height_diff=*/std::numeric_limits<uint32_t>::max(),
4712 stop_index, filter_index)) {
4713 return;
4714 }
4715
4716 std::vector<uint256> headers(stop_index->nHeight / CFCHECKPT_INTERVAL);
4717
4718 // Populate headers.
4719 const CBlockIndex *block_index = stop_index;
4720 for (int i = headers.size() - 1; i >= 0; i--) {
4721 int height = (i + 1) * CFCHECKPT_INTERVAL;
4722 block_index = block_index->GetAncestor(height);
4723
4724 if (!filter_index->LookupFilterHeader(block_index, headers[i])) {
4726 "Failed to find block filter header in index: "
4727 "filter_type=%s, block_hash=%s\n",
4728 BlockFilterTypeName(filter_type),
4729 block_index->GetBlockHash().ToString());
4730 return;
4731 }
4732 }
4733
4734 CSerializedNetMsg msg = CNetMsgMaker(node.GetCommonVersion())
4735 .Make(NetMsgType::CFCHECKPT, filter_type_ser,
4736 stop_index->GetBlockHash(), headers);
4737 m_connman.PushMessage(&node, std::move(msg));
4738}
4739
4740bool IsAvalancheMessageType(const std::string &msg_type) {
4741 return msg_type == NetMsgType::AVAHELLO ||
4742 msg_type == NetMsgType::AVAPOLL ||
4743 msg_type == NetMsgType::AVARESPONSE ||
4744 msg_type == NetMsgType::AVAPROOF ||
4745 msg_type == NetMsgType::GETAVAADDR ||
4746 msg_type == NetMsgType::GETAVAPROOFS ||
4747 msg_type == NetMsgType::AVAPROOFS ||
4748 msg_type == NetMsgType::AVAPROOFSREQ;
4749}
4750
4751uint32_t
4752PeerManagerImpl::GetAvalancheVoteForBlock(const BlockHash &hash) const {
4754
4755 const CBlockIndex *pindex = m_chainman.m_blockman.LookupBlockIndex(hash);
4756
4757 // Unknown block.
4758 if (!pindex) {
4759 return -1;
4760 }
4761
4762 // Invalid block
4763 if (pindex->nStatus.isInvalid()) {
4764 return 1;
4765 }
4766
4767 // Parked block
4768 if (pindex->nStatus.isOnParkedChain()) {
4769 return 2;
4770 }
4771
4772 const CBlockIndex *pindexTip = m_chainman.ActiveChain().Tip();
4773 const CBlockIndex *pindexFork = LastCommonAncestor(pindex, pindexTip);
4774
4775 // Active block.
4776 if (pindex == pindexFork) {
4777 return 0;
4778 }
4779
4780 // Fork block.
4781 if (pindexFork != pindexTip) {
4782 return 3;
4783 }
4784
4785 // Missing block data.
4786 if (!pindex->nStatus.hasData()) {
4787 return -2;
4788 }
4789
4790 // This block is built on top of the tip, we have the data, it
4791 // is pending connection or rejection.
4792 return -3;
4793};
4794
4795uint32_t
4796PeerManagerImpl::GetAvalancheVoteForTx(const avalanche::Processor &avalanche,
4797 const TxId &id) const {
4798 // Recently confirmed
4799 if (WITH_LOCK(m_recent_confirmed_transactions_mutex,
4800 return m_recent_confirmed_transactions.contains(id))) {
4801 return 0;
4802 }
4803
4804 CTransactionRef mempool_tx;
4805 {
4806 LOCK(::cs_main);
4807
4808 // Invalid tx. m_recent_rejects needs cs_main
4809 if (m_recent_rejects.contains(id)) {
4810 return 1;
4811 }
4812
4813 LOCK(m_mempool.cs);
4814
4815 // Finalized
4816 if (m_mempool.isAvalancheFinalizedPreConsensus(id)) {
4817 return 0;
4818 }
4819
4820 // Accepted in mempool
4821 if (auto iter = m_mempool.GetIter(id)) {
4822 mempool_tx = (**iter)->GetSharedTx();
4823 } else {
4824 // Conflicting tx
4825 if (m_mempool.withConflicting(
4826 [&id](const TxConflicting &conflicting) {
4827 return conflicting.HaveTx(id);
4828 })) {
4829 return 2;
4830 }
4831
4832 // Orphan tx
4833 if (m_mempool.withOrphanage([&id](const TxOrphanage &orphanage) {
4834 return orphanage.HaveTx(id);
4835 })) {
4836 return -2;
4837 }
4838 }
4839 } // release cs_main and mempool.cs locks
4840
4841 // isPolled() access the vote records, and should be accessed with cs_main
4842 // released.
4843 // If the tx is in the mempool...
4844 if (mempool_tx) {
4845 // ... and in the polled list
4846 if (avalanche.isPolled(mempool_tx)) {
4847 return 0;
4848 }
4849
4850 // ... but not in the polled list
4851 return -3;
4852 }
4853
4854 // Unknown tx
4855 return -1;
4856};
4857
4865 const avalanche::ProofId &id) {
4866 return avalanche.withPeerManager([&id](avalanche::PeerManager &pm) {
4867 // Rejected proof
4868 if (pm.isInvalid(id)) {
4869 return 1;
4870 }
4871
4872 // The proof is actively bound to a peer
4873 if (pm.isBoundToPeer(id)) {
4874 return 0;
4875 }
4876
4877 // Unknown proof
4878 if (!pm.exists(id)) {
4879 return -1;
4880 }
4881
4882 // Immature proof
4883 if (pm.isImmature(id)) {
4884 return 2;
4885 }
4886
4887 // Not immature, but in conflict with an actively bound proof
4888 if (pm.isInConflictingPool(id)) {
4889 return 3;
4890 }
4891
4892 // The proof is known, not rejected, not immature, not a conflict, but
4893 // for some reason unbound. This should not happen if the above pools
4894 // are managed correctly, but added for robustness.
4895 return -2;
4896 });
4897};
4898
4899void PeerManagerImpl::ProcessBlock(const Config &config, CNode &node,
4900 const std::shared_ptr<const CBlock> &block,
4901 bool force_processing,
4902 bool min_pow_checked) {
4903 bool new_block{false};
4904 m_chainman.ProcessNewBlock(block, force_processing, min_pow_checked,
4905 &new_block, m_avalanche);
4906 if (new_block) {
4907 node.m_last_block_time = GetTime<std::chrono::seconds>();
4908 // In case this block came from a different peer than we requested
4909 // from, we can erase the block request now anyway (as we just stored
4910 // this block to disk).
4911 LOCK(cs_main);
4912 RemoveBlockRequest(block->GetHash(), std::nullopt);
4913 } else {
4914 LOCK(cs_main);
4915 mapBlockSource.erase(block->GetHash());
4916 }
4917}
4918
4919void PeerManagerImpl::ProcessMessage(
4920 const Config &config, CNode &pfrom, const std::string &msg_type,
4921 CDataStream &vRecv, const std::chrono::microseconds time_received,
4922 const std::atomic<bool> &interruptMsgProc) {
4923 AssertLockHeld(g_msgproc_mutex);
4924
4925 LogPrint(BCLog::NETDEBUG, "received: %s (%u bytes) peer=%d\n",
4926 SanitizeString(msg_type), vRecv.size(), pfrom.GetId());
4927
4928 PeerRef peer = GetPeerRef(pfrom.GetId());
4929 if (peer == nullptr) {
4930 return;
4931 }
4932
4933 if (!m_avalanche && IsAvalancheMessageType(msg_type)) {
4935 "Avalanche is not initialized, ignoring %s message\n",
4936 msg_type);
4937 return;
4938 }
4939
4940 if (msg_type == NetMsgType::VERSION) {
4941 // Each connection can only send one version message
4942 if (pfrom.nVersion != 0) {
4943 LogPrint(BCLog::NET, "redundant version message from peer=%d\n",
4944 pfrom.GetId());
4945 return;
4946 }
4947
4948 int64_t nTime;
4949 CService addrMe;
4950 uint64_t nNonce = 1;
4951 ServiceFlags nServices;
4952 int nVersion;
4953 std::string cleanSubVer;
4954 int starting_height = -1;
4955 bool fRelay = true;
4956 uint64_t nExtraEntropy = 1;
4957
4958 vRecv >> nVersion >> Using<CustomUintFormatter<8>>(nServices) >> nTime;
4959 if (nTime < 0) {
4960 nTime = 0;
4961 }
4962 // Ignore the addrMe service bits sent by the peer
4963 vRecv.ignore(8);
4964 vRecv >> addrMe;
4965 if (!pfrom.IsInboundConn()) {
4966 m_addrman.SetServices(pfrom.addr, nServices);
4967 }
4968 if (pfrom.ExpectServicesFromConn() &&
4969 !HasAllDesirableServiceFlags(nServices)) {
4971 "peer=%d does not offer the expected services "
4972 "(%08x offered, %08x expected); disconnecting\n",
4973 pfrom.GetId(), nServices,
4974 GetDesirableServiceFlags(nServices));
4975 pfrom.fDisconnect = true;
4976 return;
4977 }
4978
4979 if (pfrom.IsAvalancheOutboundConnection() &&
4980 !(nServices & NODE_AVALANCHE)) {
4981 LogPrint(
4983 "peer=%d does not offer the avalanche service; disconnecting\n",
4984 pfrom.GetId());
4985 pfrom.fDisconnect = true;
4986 return;
4987 }
4988
4989 if (nVersion < MIN_PEER_PROTO_VERSION) {
4990 // disconnect from peers older than this proto version
4992 "peer=%d using obsolete version %i; disconnecting\n",
4993 pfrom.GetId(), nVersion);
4994 pfrom.fDisconnect = true;
4995 return;
4996 }
4997
4998 if (!vRecv.empty()) {
4999 // The version message includes information about the sending node
5000 // which we don't use:
5001 // - 8 bytes (service bits)
5002 // - 16 bytes (ipv6 address)
5003 // - 2 bytes (port)
5004 vRecv.ignore(26);
5005 vRecv >> nNonce;
5006 }
5007 if (!vRecv.empty()) {
5008 std::string strSubVer;
5009 vRecv >> LIMITED_STRING(strSubVer, MAX_SUBVERSION_LENGTH);
5010 cleanSubVer = SanitizeString(strSubVer);
5011 }
5012 if (!vRecv.empty()) {
5013 vRecv >> starting_height;
5014 }
5015 if (!vRecv.empty()) {
5016 vRecv >> fRelay;
5017 }
5018 if (!vRecv.empty()) {
5019 vRecv >> nExtraEntropy;
5020 }
5021 // Disconnect if we connected to ourself
5022 if (pfrom.IsInboundConn() && !m_connman.CheckIncomingNonce(nNonce)) {
5023 LogPrintf("connected to self at %s, disconnecting\n",
5024 pfrom.addr.ToString());
5025 pfrom.fDisconnect = true;
5026 return;
5027 }
5028
5029 if (pfrom.IsInboundConn() && addrMe.IsRoutable()) {
5030 SeenLocal(addrMe);
5031 }
5032
5033 // Inbound peers send us their version message when they connect.
5034 // We send our version message in response.
5035 if (pfrom.IsInboundConn()) {
5036 PushNodeVersion(config, pfrom, *peer);
5037 }
5038
5039 // Change version
5040 const int greatest_common_version =
5041 std::min(nVersion, PROTOCOL_VERSION);
5042 pfrom.SetCommonVersion(greatest_common_version);
5043 pfrom.nVersion = nVersion;
5044
5045 const CNetMsgMaker msg_maker(greatest_common_version);
5046
5047 m_connman.PushMessage(&pfrom, msg_maker.Make(NetMsgType::VERACK));
5048
5049 // Signal ADDRv2 support (BIP155).
5050 m_connman.PushMessage(&pfrom, msg_maker.Make(NetMsgType::SENDADDRV2));
5051
5053 HasAllDesirableServiceFlags(nServices);
5054 peer->m_their_services = nServices;
5055 pfrom.SetAddrLocal(addrMe);
5056 {
5057 LOCK(pfrom.m_subver_mutex);
5058 pfrom.cleanSubVer = cleanSubVer;
5059 }
5060 peer->m_starting_height = starting_height;
5061
5062 // Only initialize the m_tx_relay data structure if:
5063 // - this isn't an outbound block-relay-only connection; and
5064 // - this isn't an outbound feeler connection, and
5065 // - fRelay=true or we're offering NODE_BLOOM to this peer
5066 // (NODE_BLOOM means that the peer may turn on tx relay later)
5067 if (!pfrom.IsBlockOnlyConn() && !pfrom.IsFeelerConn() &&
5068 (fRelay || (peer->m_our_services & NODE_BLOOM))) {
5069 auto *const tx_relay = peer->SetTxRelay();
5070 {
5071 LOCK(tx_relay->m_bloom_filter_mutex);
5072 // set to true after we get the first filter* message
5073 tx_relay->m_relay_txs = fRelay;
5074 }
5075 if (fRelay) {
5076 pfrom.m_relays_txs = true;
5077 }
5078 }
5079
5080 pfrom.nRemoteHostNonce = nNonce;
5081 pfrom.nRemoteExtraEntropy = nExtraEntropy;
5082
5083 // Potentially mark this peer as a preferred download peer.
5084 {
5085 LOCK(cs_main);
5086 CNodeState *state = State(pfrom.GetId());
5087 state->fPreferredDownload =
5088 (!pfrom.IsInboundConn() ||
5090 !pfrom.IsAddrFetchConn() && CanServeBlocks(*peer);
5091 m_num_preferred_download_peers += state->fPreferredDownload;
5092 }
5093
5094 // Attempt to initialize address relay for outbound peers and use result
5095 // to decide whether to send GETADDR, so that we don't send it to
5096 // inbound or outbound block-relay-only peers.
5097 bool send_getaddr{false};
5098 if (!pfrom.IsInboundConn()) {
5099 send_getaddr = SetupAddressRelay(pfrom, *peer);
5100 }
5101 if (send_getaddr) {
5102 // Do a one-time address fetch to help populate/update our addrman.
5103 // If we're starting up for the first time, our addrman may be
5104 // pretty empty, so this mechanism is important to help us connect
5105 // to the network.
5106 // We skip this for block-relay-only peers. We want to avoid
5107 // potentially leaking addr information and we do not want to
5108 // indicate to the peer that we will participate in addr relay.
5109 m_connman.PushMessage(&pfrom, CNetMsgMaker(greatest_common_version)
5110 .Make(NetMsgType::GETADDR));
5111 peer->m_getaddr_sent = true;
5112 // When requesting a getaddr, accept an additional MAX_ADDR_TO_SEND
5113 // addresses in response (bypassing the
5114 // MAX_ADDR_PROCESSING_TOKEN_BUCKET limit).
5115 WITH_LOCK(peer->m_addr_token_bucket_mutex,
5116 peer->m_addr_token_bucket += m_opts.max_addr_to_send);
5117 }
5118
5119 if (!pfrom.IsInboundConn()) {
5120 // For non-inbound connections, we update the addrman to record
5121 // connection success so that addrman will have an up-to-date
5122 // notion of which peers are online and available.
5123 //
5124 // While we strive to not leak information about block-relay-only
5125 // connections via the addrman, not moving an address to the tried
5126 // table is also potentially detrimental because new-table entries
5127 // are subject to eviction in the event of addrman collisions. We
5128 // mitigate the information-leak by never calling
5129 // AddrMan::Connected() on block-relay-only peers; see
5130 // FinalizeNode().
5131 //
5132 // This moves an address from New to Tried table in Addrman,
5133 // resolves tried-table collisions, etc.
5134 m_addrman.Good(pfrom.addr);
5135 }
5136
5137 std::string remoteAddr;
5138 if (fLogIPs) {
5139 remoteAddr = ", peeraddr=" + pfrom.addr.ToString();
5140 }
5141
5143 "receive version message: [%s] %s: version %d, blocks=%d, "
5144 "us=%s, txrelay=%d, peer=%d%s\n",
5145 pfrom.addr.ToString(), cleanSubVer, pfrom.nVersion,
5146 peer->m_starting_height, addrMe.ToString(), fRelay,
5147 pfrom.GetId(), remoteAddr);
5148
5149 int64_t currentTime = GetTime();
5150 int64_t nTimeOffset = nTime - currentTime;
5151 pfrom.nTimeOffset = nTimeOffset;
5152 if (nTime < int64_t(m_chainparams.GenesisBlock().nTime)) {
5153 // Ignore time offsets that are improbable (before the Genesis
5154 // block) and may underflow our adjusted time.
5155 Misbehaving(*peer, "Ignoring invalid timestamp in version message");
5156 } else if (!pfrom.IsInboundConn()) {
5157 // Don't use timedata samples from inbound peers to make it
5158 // harder for others to tamper with our adjusted time.
5159 AddTimeData(pfrom.addr, nTimeOffset);
5160 }
5161
5162 // Feeler connections exist only to verify if address is online.
5163 if (pfrom.IsFeelerConn()) {
5165 "feeler connection completed peer=%d; disconnecting\n",
5166 pfrom.GetId());
5167 pfrom.fDisconnect = true;
5168 }
5169 return;
5170 }
5171
5172 if (pfrom.nVersion == 0) {
5173 // Must have a version message before anything else
5174 Misbehaving(*peer, "non-version message before version handshake");
5175 return;
5176 }
5177
5178 // At this point, the outgoing message serialization version can't change.
5179 const CNetMsgMaker msgMaker(pfrom.GetCommonVersion());
5180
5181 if (msg_type == NetMsgType::VERACK) {
5182 if (pfrom.fSuccessfullyConnected) {
5184 "ignoring redundant verack message from peer=%d\n",
5185 pfrom.GetId());
5186 return;
5187 }
5188
5189 if (!pfrom.IsInboundConn()) {
5190 LogPrintf(
5191 "New outbound peer connected: version: %d, blocks=%d, "
5192 "peer=%d%s (%s)\n",
5193 pfrom.nVersion.load(), peer->m_starting_height, pfrom.GetId(),
5194 (fLogIPs ? strprintf(", peeraddr=%s", pfrom.addr.ToString())
5195 : ""),
5196 pfrom.ConnectionTypeAsString());
5197 }
5198
5200 // Tell our peer we are willing to provide version 1
5201 // cmpctblocks. However, we do not request new block announcements
5202 // using cmpctblock messages. We send this to non-NODE NETWORK peers
5203 // as well, because they may wish to request compact blocks from us.
5204 m_connman.PushMessage(
5205 &pfrom,
5206 msgMaker.Make(NetMsgType::SENDCMPCT, /*high_bandwidth=*/false,
5207 /*version=*/CMPCTBLOCKS_VERSION));
5208 }
5209
5210 if (m_avalanche) {
5211 if (m_avalanche->sendHello(&pfrom)) {
5212 auto localProof = m_avalanche->getLocalProof();
5213
5214 if (localProof) {
5215 AddKnownProof(*peer, localProof->getId());
5216 // Add our proof id to the list or the recently announced
5217 // proof INVs to this peer. This is used for filtering which
5218 // INV can be requested for download.
5219 peer->m_proof_relay->m_recently_announced_proofs.insert(
5220 localProof->getId());
5221 }
5222 }
5223 }
5224
5225 if (auto tx_relay = peer->GetTxRelay()) {
5226 // `TxRelay::m_tx_inventory_to_send` must be empty before the
5227 // version handshake is completed as
5228 // `TxRelay::m_next_inv_send_time` is first initialised in
5229 // `SendMessages` after the verack is received. Any transactions
5230 // received during the version handshake would otherwise
5231 // immediately be advertised without random delay, potentially
5232 // leaking the time of arrival to a spy.
5233 Assume(WITH_LOCK(tx_relay->m_tx_inventory_mutex,
5234 return tx_relay->m_tx_inventory_to_send.empty() &&
5235 tx_relay->m_next_inv_send_time == 0s));
5236 }
5237
5238 pfrom.fSuccessfullyConnected = true;
5239 return;
5240 }
5241
5242 if (!pfrom.fSuccessfullyConnected) {
5243 // Must have a verack message before anything else
5244 Misbehaving(*peer, "non-verack message before version handshake");
5245 return;
5246 }
5247
5248 if (msg_type == NetMsgType::ADDR || msg_type == NetMsgType::ADDRV2) {
5249 int stream_version = vRecv.GetVersion();
5250 if (msg_type == NetMsgType::ADDRV2) {
5251 // Add ADDRV2_FORMAT to the version so that the CNetAddr and
5252 // CAddress unserialize methods know that an address in v2 format is
5253 // coming.
5254 stream_version |= ADDRV2_FORMAT;
5255 }
5256
5257 OverrideStream<CDataStream> s(&vRecv, vRecv.GetType(), stream_version);
5258 std::vector<CAddress> vAddr;
5259
5260 s >> vAddr;
5261
5262 if (!SetupAddressRelay(pfrom, *peer)) {
5263 LogPrint(BCLog::NET, "ignoring %s message from %s peer=%d\n",
5264 msg_type, pfrom.ConnectionTypeAsString(), pfrom.GetId());
5265 return;
5266 }
5267
5268 if (vAddr.size() > m_opts.max_addr_to_send) {
5269 Misbehaving(*peer, strprintf("%s message size = %u", msg_type,
5270 vAddr.size()));
5271 return;
5272 }
5273
5274 // Store the new addresses
5275 std::vector<CAddress> vAddrOk;
5276 const auto current_a_time{Now<NodeSeconds>()};
5277
5278 // Update/increment addr rate limiting bucket.
5279 const auto current_time = GetTime<std::chrono::microseconds>();
5280 {
5281 LOCK(peer->m_addr_token_bucket_mutex);
5282 if (peer->m_addr_token_bucket < MAX_ADDR_PROCESSING_TOKEN_BUCKET) {
5283 // Don't increment bucket if it's already full
5284 const auto time_diff =
5285 std::max(current_time - peer->m_addr_token_timestamp, 0us);
5286 const double increment =
5288 peer->m_addr_token_bucket =
5289 std::min<double>(peer->m_addr_token_bucket + increment,
5291 }
5292 }
5293 peer->m_addr_token_timestamp = current_time;
5294
5295 const bool rate_limited =
5297 uint64_t num_proc = 0;
5298 uint64_t num_rate_limit = 0;
5299 Shuffle(vAddr.begin(), vAddr.end(), m_rng);
5300 for (CAddress &addr : vAddr) {
5301 if (interruptMsgProc) {
5302 return;
5303 }
5304
5305 {
5306 LOCK(peer->m_addr_token_bucket_mutex);
5307 // Apply rate limiting.
5308 if (peer->m_addr_token_bucket < 1.0) {
5309 if (rate_limited) {
5310 ++num_rate_limit;
5311 continue;
5312 }
5313 } else {
5314 peer->m_addr_token_bucket -= 1.0;
5315 }
5316 }
5317
5318 // We only bother storing full nodes, though this may include things
5319 // which we would not make an outbound connection to, in part
5320 // because we may make feeler connections to them.
5321 if (!MayHaveUsefulAddressDB(addr.nServices) &&
5323 continue;
5324 }
5325
5326 if (addr.nTime <= NodeSeconds{100000000s} ||
5327 addr.nTime > current_a_time + 10min) {
5328 addr.nTime = current_a_time - 5 * 24h;
5329 }
5330 AddAddressKnown(*peer, addr);
5331 if (m_banman &&
5332 (m_banman->IsDiscouraged(addr) || m_banman->IsBanned(addr))) {
5333 // Do not process banned/discouraged addresses beyond
5334 // remembering we received them
5335 continue;
5336 }
5337 ++num_proc;
5338 bool fReachable = IsReachable(addr);
5339 if (addr.nTime > current_a_time - 10min && !peer->m_getaddr_sent &&
5340 vAddr.size() <= 10 && addr.IsRoutable()) {
5341 // Relay to a limited number of other nodes
5342 RelayAddress(pfrom.GetId(), addr, fReachable);
5343 }
5344 // Do not store addresses outside our network
5345 if (fReachable) {
5346 vAddrOk.push_back(addr);
5347 }
5348 }
5349 peer->m_addr_processed += num_proc;
5350 peer->m_addr_rate_limited += num_rate_limit;
5352 "Received addr: %u addresses (%u processed, %u rate-limited) "
5353 "from peer=%d\n",
5354 vAddr.size(), num_proc, num_rate_limit, pfrom.GetId());
5355
5356 m_addrman.Add(vAddrOk, pfrom.addr, 2h);
5357 if (vAddr.size() < 1000) {
5358 peer->m_getaddr_sent = false;
5359 }
5360
5361 // AddrFetch: Require multiple addresses to avoid disconnecting on
5362 // self-announcements
5363 if (pfrom.IsAddrFetchConn() && vAddr.size() > 1) {
5365 "addrfetch connection completed peer=%d; disconnecting\n",
5366 pfrom.GetId());
5367 pfrom.fDisconnect = true;
5368 }
5369 return;
5370 }
5371
5372 if (msg_type == NetMsgType::SENDADDRV2) {
5373 peer->m_wants_addrv2 = true;
5374 return;
5375 }
5376
5377 if (msg_type == NetMsgType::SENDHEADERS) {
5378 peer->m_prefers_headers = true;
5379 return;
5380 }
5381
5382 if (msg_type == NetMsgType::SENDCMPCT) {
5383 bool sendcmpct_hb{false};
5384 uint64_t sendcmpct_version{0};
5385 vRecv >> sendcmpct_hb >> sendcmpct_version;
5386
5387 if (sendcmpct_version != CMPCTBLOCKS_VERSION) {
5388 return;
5389 }
5390
5391 LOCK(cs_main);
5392 CNodeState *nodestate = State(pfrom.GetId());
5393 nodestate->m_provides_cmpctblocks = true;
5394 nodestate->m_requested_hb_cmpctblocks = sendcmpct_hb;
5395 // save whether peer selects us as BIP152 high-bandwidth peer
5396 // (receiving sendcmpct(1) signals high-bandwidth,
5397 // sendcmpct(0) low-bandwidth)
5398 pfrom.m_bip152_highbandwidth_from = sendcmpct_hb;
5399 return;
5400 }
5401
5402 if (msg_type == NetMsgType::INV) {
5403 std::vector<CInv> vInv;
5404 vRecv >> vInv;
5405 if (vInv.size() > MAX_INV_SZ) {
5406 Misbehaving(*peer, strprintf("inv message size = %u", vInv.size()));
5407 return;
5408 }
5409
5410 const bool reject_tx_invs{RejectIncomingTxs(pfrom)};
5411
5412 const auto current_time{GetTime<std::chrono::microseconds>()};
5413 std::optional<BlockHash> best_block;
5414
5415 auto logInv = [&](const CInv &inv, bool fAlreadyHave) {
5416 LogPrint(BCLog::NET, "got inv: %s %s peer=%d\n", inv.ToString(),
5417 fAlreadyHave ? "have" : "new", pfrom.GetId());
5418 };
5419
5420 for (CInv &inv : vInv) {
5421 if (interruptMsgProc) {
5422 return;
5423 }
5424
5425 if (inv.IsMsgStakeContender()) {
5426 // Ignore invs with stake contenders. This type is only used for
5427 // polling.
5428 continue;
5429 }
5430
5431 if (inv.IsMsgBlk()) {
5432 LOCK(cs_main);
5433 const bool fAlreadyHave = AlreadyHaveBlock(BlockHash(inv.hash));
5434 logInv(inv, fAlreadyHave);
5435
5436 BlockHash hash{inv.hash};
5437 UpdateBlockAvailability(pfrom.GetId(), hash);
5438 if (!fAlreadyHave && !m_chainman.m_blockman.LoadingBlocks() &&
5439 !IsBlockRequested(hash)) {
5440 // Headers-first is the primary method of announcement on
5441 // the network. If a node fell back to sending blocks by
5442 // inv, it may be for a re-org, or because we haven't
5443 // completed initial headers sync. The final block hash
5444 // provided should be the highest, so send a getheaders and
5445 // then fetch the blocks we need to catch up.
5446 best_block = std::move(hash);
5447 }
5448
5449 continue;
5450 }
5451
5452 if (inv.IsMsgProof()) {
5453 if (!m_avalanche) {
5454 continue;
5455 }
5456 const avalanche::ProofId proofid(inv.hash);
5457 const bool fAlreadyHave = AlreadyHaveProof(proofid);
5458 logInv(inv, fAlreadyHave);
5459 AddKnownProof(*peer, proofid);
5460
5461 if (!fAlreadyHave && m_avalanche &&
5462 !m_chainman.IsInitialBlockDownload()) {
5463 const bool preferred = isPreferredDownloadPeer(pfrom);
5464
5465 LOCK(cs_proofrequest);
5466 AddProofAnnouncement(pfrom, proofid, current_time,
5467 preferred);
5468 }
5469 continue;
5470 }
5471
5472 if (inv.IsMsgTx()) {
5473 LOCK(cs_main);
5474 const TxId txid(inv.hash);
5475 const bool fAlreadyHave =
5476 AlreadyHaveTx(txid, /*include_reconsiderable=*/true);
5477 logInv(inv, fAlreadyHave);
5478
5479 AddKnownTx(*peer, txid);
5480 if (reject_tx_invs) {
5482 "transaction (%s) inv sent in violation of "
5483 "protocol, disconnecting peer=%d\n",
5484 txid.ToString(), pfrom.GetId());
5485 pfrom.fDisconnect = true;
5486 return;
5487 } else if (!fAlreadyHave &&
5488 !m_chainman.IsInitialBlockDownload()) {
5489 AddTxAnnouncement(pfrom, txid, current_time);
5490 }
5491
5492 continue;
5493 }
5494
5496 "Unknown inv type \"%s\" received from peer=%d\n",
5497 inv.ToString(), pfrom.GetId());
5498 }
5499
5500 if (best_block) {
5501 // If we haven't started initial headers-sync with this peer, then
5502 // consider sending a getheaders now. On initial startup, there's a
5503 // reliability vs bandwidth tradeoff, where we are only trying to do
5504 // initial headers sync with one peer at a time, with a long
5505 // timeout (at which point, if the sync hasn't completed, we will
5506 // disconnect the peer and then choose another). In the meantime,
5507 // as new blocks are found, we are willing to add one new peer per
5508 // block to sync with as well, to sync quicker in the case where
5509 // our initial peer is unresponsive (but less bandwidth than we'd
5510 // use if we turned on sync with all peers).
5511 LOCK(::cs_main);
5512 CNodeState &state{*Assert(State(pfrom.GetId()))};
5513 if (state.fSyncStarted ||
5514 (!peer->m_inv_triggered_getheaders_before_sync &&
5515 *best_block != m_last_block_inv_triggering_headers_sync)) {
5516 if (MaybeSendGetHeaders(
5517 pfrom, GetLocator(m_chainman.m_best_header), *peer)) {
5518 LogPrint(BCLog::NET, "getheaders (%d) %s to peer=%d\n",
5519 m_chainman.m_best_header->nHeight,
5520 best_block->ToString(), pfrom.GetId());
5521 }
5522 if (!state.fSyncStarted) {
5523 peer->m_inv_triggered_getheaders_before_sync = true;
5524 // Update the last block hash that triggered a new headers
5525 // sync, so that we don't turn on headers sync with more
5526 // than 1 new peer every new block.
5527 m_last_block_inv_triggering_headers_sync = *best_block;
5528 }
5529 }
5530 }
5531
5532 return;
5533 }
5534
5535 if (msg_type == NetMsgType::GETDATA) {
5536 std::vector<CInv> vInv;
5537 vRecv >> vInv;
5538 if (vInv.size() > MAX_INV_SZ) {
5539 Misbehaving(*peer,
5540 strprintf("getdata message size = %u", vInv.size()));
5541 return;
5542 }
5543
5544 LogPrint(BCLog::NET, "received getdata (%u invsz) peer=%d\n",
5545 vInv.size(), pfrom.GetId());
5546
5547 if (vInv.size() > 0) {
5548 LogPrint(BCLog::NET, "received getdata for: %s peer=%d\n",
5549 vInv[0].ToString(), pfrom.GetId());
5550 }
5551
5552 {
5553 LOCK(peer->m_getdata_requests_mutex);
5554 peer->m_getdata_requests.insert(peer->m_getdata_requests.end(),
5555 vInv.begin(), vInv.end());
5556 ProcessGetData(config, pfrom, *peer, interruptMsgProc);
5557 }
5558
5559 return;
5560 }
5561
5562 if (msg_type == NetMsgType::GETBLOCKS) {
5563 CBlockLocator locator;
5564 uint256 hashStop;
5565 vRecv >> locator >> hashStop;
5566
5567 if (locator.vHave.size() > MAX_LOCATOR_SZ) {
5569 "getblocks locator size %lld > %d, disconnect peer=%d\n",
5570 locator.vHave.size(), MAX_LOCATOR_SZ, pfrom.GetId());
5571 pfrom.fDisconnect = true;
5572 return;
5573 }
5574
5575 // We might have announced the currently-being-connected tip using a
5576 // compact block, which resulted in the peer sending a getblocks
5577 // request, which we would otherwise respond to without the new block.
5578 // To avoid this situation we simply verify that we are on our best
5579 // known chain now. This is super overkill, but we handle it better
5580 // for getheaders requests, and there are no known nodes which support
5581 // compact blocks but still use getblocks to request blocks.
5582 {
5583 std::shared_ptr<const CBlock> a_recent_block;
5584 {
5585 LOCK(m_most_recent_block_mutex);
5586 a_recent_block = m_most_recent_block;
5587 }
5589 if (!m_chainman.ActiveChainstate().ActivateBestChain(
5590 state, a_recent_block, m_avalanche)) {
5591 LogPrint(BCLog::NET, "failed to activate chain (%s)\n",
5592 state.ToString());
5593 }
5594 }
5595
5596 LOCK(cs_main);
5597
5598 // Find the last block the caller has in the main chain
5599 const CBlockIndex *pindex =
5600 m_chainman.ActiveChainstate().FindForkInGlobalIndex(locator);
5601
5602 // Send the rest of the chain
5603 if (pindex) {
5604 pindex = m_chainman.ActiveChain().Next(pindex);
5605 }
5606 int nLimit = 500;
5607 LogPrint(BCLog::NET, "getblocks %d to %s limit %d from peer=%d\n",
5608 (pindex ? pindex->nHeight : -1),
5609 hashStop.IsNull() ? "end" : hashStop.ToString(), nLimit,
5610 pfrom.GetId());
5611 for (; pindex; pindex = m_chainman.ActiveChain().Next(pindex)) {
5612 if (pindex->GetBlockHash() == hashStop) {
5613 LogPrint(BCLog::NET, " getblocks stopping at %d %s\n",
5614 pindex->nHeight, pindex->GetBlockHash().ToString());
5615 break;
5616 }
5617 // If pruning, don't inv blocks unless we have on disk and are
5618 // likely to still have for some reasonable time window (1 hour)
5619 // that block relay might require.
5620 const int nPrunedBlocksLikelyToHave =
5622 3600 / m_chainparams.GetConsensus().nPowTargetSpacing;
5623 if (m_chainman.m_blockman.IsPruneMode() &&
5624 (!pindex->nStatus.hasData() ||
5625 pindex->nHeight <= m_chainman.ActiveChain().Tip()->nHeight -
5626 nPrunedBlocksLikelyToHave)) {
5627 LogPrint(
5628 BCLog::NET,
5629 " getblocks stopping, pruned or too old block at %d %s\n",
5630 pindex->nHeight, pindex->GetBlockHash().ToString());
5631 break;
5632 }
5633 WITH_LOCK(
5634 peer->m_block_inv_mutex,
5635 peer->m_blocks_for_inv_relay.push_back(pindex->GetBlockHash()));
5636 if (--nLimit <= 0) {
5637 // When this block is requested, we'll send an inv that'll
5638 // trigger the peer to getblocks the next batch of inventory.
5639 LogPrint(BCLog::NET, " getblocks stopping at limit %d %s\n",
5640 pindex->nHeight, pindex->GetBlockHash().ToString());
5641 WITH_LOCK(peer->m_block_inv_mutex, {
5642 peer->m_continuation_block = pindex->GetBlockHash();
5643 });
5644 break;
5645 }
5646 }
5647 return;
5648 }
5649
5650 if (msg_type == NetMsgType::GETBLOCKTXN) {
5652 vRecv >> req;
5653
5654 std::shared_ptr<const CBlock> recent_block;
5655 {
5656 LOCK(m_most_recent_block_mutex);
5657 if (m_most_recent_block_hash == req.blockhash) {
5658 recent_block = m_most_recent_block;
5659 }
5660 // Unlock m_most_recent_block_mutex to avoid cs_main lock inversion
5661 }
5662 if (recent_block) {
5663 SendBlockTransactions(pfrom, *peer, *recent_block, req);
5664 return;
5665 }
5666
5667 FlatFilePos block_pos{};
5668 {
5669 LOCK(cs_main);
5670
5671 const CBlockIndex *pindex =
5672 m_chainman.m_blockman.LookupBlockIndex(req.blockhash);
5673 if (!pindex || !pindex->nStatus.hasData()) {
5674 LogPrint(
5675 BCLog::NET,
5676 "Peer %d sent us a getblocktxn for a block we don't have\n",
5677 pfrom.GetId());
5678 return;
5679 }
5680
5681 if (pindex->nHeight >=
5682 m_chainman.ActiveChain().Height() - MAX_BLOCKTXN_DEPTH) {
5683 block_pos = pindex->GetBlockPos();
5684 }
5685 }
5686
5687 if (!block_pos.IsNull()) {
5688 CBlock block;
5689 const bool ret{
5690 m_chainman.m_blockman.ReadBlockFromDisk(block, block_pos)};
5691 // If height is above MAX_BLOCKTXN_DEPTH then this block cannot get
5692 // pruned after we release cs_main above, so this read should never
5693 // fail.
5694 assert(ret);
5695
5696 SendBlockTransactions(pfrom, *peer, block, req);
5697 return;
5698 }
5699
5700 // If an older block is requested (should never happen in practice,
5701 // but can happen in tests) send a block response instead of a
5702 // blocktxn response. Sending a full block response instead of a
5703 // small blocktxn response is preferable in the case where a peer
5704 // might maliciously send lots of getblocktxn requests to trigger
5705 // expensive disk reads, because it will require the peer to
5706 // actually receive all the data read from disk over the network.
5708 "Peer %d sent us a getblocktxn for a block > %i deep\n",
5709 pfrom.GetId(), MAX_BLOCKTXN_DEPTH);
5710 CInv inv;
5711 inv.type = MSG_BLOCK;
5712 inv.hash = req.blockhash;
5713 WITH_LOCK(peer->m_getdata_requests_mutex,
5714 peer->m_getdata_requests.push_back(inv));
5715 // The message processing loop will go around again (without pausing)
5716 // and we'll respond then (without cs_main)
5717 return;
5718 }
5719
5720 if (msg_type == NetMsgType::GETHEADERS) {
5721 CBlockLocator locator;
5722 BlockHash hashStop;
5723 vRecv >> locator >> hashStop;
5724
5725 if (locator.vHave.size() > MAX_LOCATOR_SZ) {
5727 "getheaders locator size %lld > %d, disconnect peer=%d\n",
5728 locator.vHave.size(), MAX_LOCATOR_SZ, pfrom.GetId());
5729 pfrom.fDisconnect = true;
5730 return;
5731 }
5732
5733 if (m_chainman.m_blockman.LoadingBlocks()) {
5734 LogPrint(
5735 BCLog::NET,
5736 "Ignoring getheaders from peer=%d while importing/reindexing\n",
5737 pfrom.GetId());
5738 return;
5739 }
5740
5741 LOCK(cs_main);
5742
5743 // Note that if we were to be on a chain that forks from the
5744 // checkpointed chain, then serving those headers to a peer that has
5745 // seen the checkpointed chain would cause that peer to disconnect us.
5746 // Requiring that our chainwork exceed the minimum chainwork is a
5747 // protection against being fed a bogus chain when we started up for
5748 // the first time and getting partitioned off the honest network for
5749 // serving that chain to others.
5750 if (m_chainman.ActiveTip() == nullptr ||
5751 (m_chainman.ActiveTip()->nChainWork <
5752 m_chainman.MinimumChainWork() &&
5755 "Ignoring getheaders from peer=%d because active chain "
5756 "has too little work; sending empty response\n",
5757 pfrom.GetId());
5758 // Just respond with an empty headers message, to tell the peer to
5759 // go away but not treat us as unresponsive.
5760 m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::HEADERS,
5761 std::vector<CBlock>()));
5762 return;
5763 }
5764
5765 CNodeState *nodestate = State(pfrom.GetId());
5766 const CBlockIndex *pindex = nullptr;
5767 if (locator.IsNull()) {
5768 // If locator is null, return the hashStop block
5769 pindex = m_chainman.m_blockman.LookupBlockIndex(hashStop);
5770 if (!pindex) {
5771 return;
5772 }
5773
5774 if (!BlockRequestAllowed(pindex)) {
5776 "%s: ignoring request from peer=%i for old block "
5777 "header that isn't in the main chain\n",
5778 __func__, pfrom.GetId());
5779 return;
5780 }
5781 } else {
5782 // Find the last block the caller has in the main chain
5783 pindex =
5784 m_chainman.ActiveChainstate().FindForkInGlobalIndex(locator);
5785 if (pindex) {
5786 pindex = m_chainman.ActiveChain().Next(pindex);
5787 }
5788 }
5789
5790 // we must use CBlocks, as CBlockHeaders won't include the 0x00 nTx
5791 // count at the end
5792 std::vector<CBlock> vHeaders;
5793 int nLimit = MAX_HEADERS_RESULTS;
5794 LogPrint(BCLog::NET, "getheaders %d to %s from peer=%d\n",
5795 (pindex ? pindex->nHeight : -1),
5796 hashStop.IsNull() ? "end" : hashStop.ToString(),
5797 pfrom.GetId());
5798 for (; pindex; pindex = m_chainman.ActiveChain().Next(pindex)) {
5799 vHeaders.push_back(pindex->GetBlockHeader());
5800 if (--nLimit <= 0 || pindex->GetBlockHash() == hashStop) {
5801 break;
5802 }
5803 }
5804 // pindex can be nullptr either if we sent
5805 // m_chainman.ActiveChain().Tip() OR if our peer has
5806 // m_chainman.ActiveChain().Tip() (and thus we are sending an empty
5807 // headers message). In both cases it's safe to update
5808 // pindexBestHeaderSent to be our tip.
5809 //
5810 // It is important that we simply reset the BestHeaderSent value here,
5811 // and not max(BestHeaderSent, newHeaderSent). We might have announced
5812 // the currently-being-connected tip using a compact block, which
5813 // resulted in the peer sending a headers request, which we respond to
5814 // without the new block. By resetting the BestHeaderSent, we ensure we
5815 // will re-announce the new block via headers (or compact blocks again)
5816 // in the SendMessages logic.
5817 nodestate->pindexBestHeaderSent =
5818 pindex ? pindex : m_chainman.ActiveChain().Tip();
5819 m_connman.PushMessage(&pfrom,
5820 msgMaker.Make(NetMsgType::HEADERS, vHeaders));
5821 return;
5822 }
5823
5824 if (msg_type == NetMsgType::TX) {
5825 if (RejectIncomingTxs(pfrom)) {
5827 "transaction sent in violation of protocol peer=%d\n",
5828 pfrom.GetId());
5829 pfrom.fDisconnect = true;
5830 return;
5831 }
5832
5833 // Stop processing the transaction early if we are still in IBD since we
5834 // don't have enough information to validate it yet. Sending unsolicited
5835 // transactions is not considered a protocol violation, so don't punish
5836 // the peer.
5837 if (m_chainman.IsInitialBlockDownload()) {
5838 return;
5839 }
5840
5841 CTransactionRef ptx;
5842 vRecv >> ptx;
5843 const CTransaction &tx = *ptx;
5844 const TxId &txid = tx.GetId();
5845 AddKnownTx(*peer, txid);
5846
5847 {
5848 LOCK(cs_main);
5849
5850 m_txrequest.ReceivedResponse(pfrom.GetId(), txid);
5851
5852 if (AlreadyHaveTx(txid, /*include_reconsiderable=*/true)) {
5854 // Always relay transactions received from peers with
5855 // forcerelay permission, even if they were already in the
5856 // mempool, allowing the node to function as a gateway for
5857 // nodes hidden behind it.
5858 if (!m_mempool.exists(tx.GetId())) {
5859 LogPrintf(
5860 "Not relaying non-mempool transaction %s from "
5861 "forcerelay peer=%d\n",
5862 tx.GetId().ToString(), pfrom.GetId());
5863 } else {
5864 LogPrintf("Force relaying tx %s from peer=%d\n",
5865 tx.GetId().ToString(), pfrom.GetId());
5866 RelayTransaction(tx.GetId());
5867 }
5868 }
5869
5870 if (m_recent_rejects_package_reconsiderable.contains(txid)) {
5871 // When a transaction is already in
5872 // m_recent_rejects_package_reconsiderable, we shouldn't
5873 // submit it by itself again. However, look for a matching
5874 // child in the orphanage, as it is possible that they
5875 // succeed as a package.
5876 LogPrint(
5878 "found tx %s in reconsiderable rejects, looking for "
5879 "child in orphanage\n",
5880 txid.ToString());
5881 if (auto package_to_validate{
5882 Find1P1CPackage(ptx, pfrom.GetId())}) {
5883 const auto package_result{ProcessNewPackage(
5884 m_chainman.ActiveChainstate(), m_mempool,
5885 package_to_validate->m_txns,
5886 /*test_accept=*/false)};
5888 "package evaluation for %s: %s (%s)\n",
5889 package_to_validate->ToString(),
5890 package_result.m_state.IsValid()
5891 ? "package accepted"
5892 : "package rejected",
5893 package_result.m_state.ToString());
5894 ProcessPackageResult(package_to_validate.value(),
5895 package_result);
5896 }
5897 }
5898 // If a tx is detected by m_recent_rejects it is ignored.
5899 // Because we haven't submitted the tx to our mempool, we won't
5900 // have computed a DoS score for it or determined exactly why we
5901 // consider it invalid.
5902 //
5903 // This means we won't penalize any peer subsequently relaying a
5904 // DoSy tx (even if we penalized the first peer who gave it to
5905 // us) because we have to account for m_recent_rejects showing
5906 // false positives. In other words, we shouldn't penalize a peer
5907 // if we aren't *sure* they submitted a DoSy tx.
5908 //
5909 // Note that m_recent_rejects doesn't just record DoSy or
5910 // invalid transactions, but any tx not accepted by the mempool,
5911 // which may be due to node policy (vs. consensus). So we can't
5912 // blanket penalize a peer simply for relaying a tx that our
5913 // m_recent_rejects has caught, regardless of false positives.
5914 return;
5915 }
5916
5917 const MempoolAcceptResult result =
5918 m_chainman.ProcessTransaction(ptx);
5919 const TxValidationState &state = result.m_state;
5920
5921 if (result.m_result_type ==
5923 ProcessValidTx(pfrom.GetId(), ptx);
5924 pfrom.m_last_tx_time = GetTime<std::chrono::seconds>();
5925 } else if (state.GetResult() ==
5927 // It may be the case that the orphans parents have all been
5928 // rejected.
5929 bool fRejectedParents = false;
5930
5931 // Deduplicate parent txids, so that we don't have to loop over
5932 // the same parent txid more than once down below.
5933 std::vector<TxId> unique_parents;
5934 unique_parents.reserve(tx.vin.size());
5935 for (const CTxIn &txin : tx.vin) {
5936 // We start with all parents, and then remove duplicates
5937 // below.
5938 unique_parents.push_back(txin.prevout.GetTxId());
5939 }
5940 std::sort(unique_parents.begin(), unique_parents.end());
5941 unique_parents.erase(
5942 std::unique(unique_parents.begin(), unique_parents.end()),
5943 unique_parents.end());
5944
5945 // Distinguish between parents in m_recent_rejects and
5946 // m_recent_rejects_package_reconsiderable. We can tolerate
5947 // having up to 1 parent in
5948 // m_recent_rejects_package_reconsiderable since we submit 1p1c
5949 // packages. However, fail immediately if any are in
5950 // m_recent_rejects.
5951 std::optional<TxId> rejected_parent_reconsiderable;
5952 for (const TxId &parent_txid : unique_parents) {
5953 if (m_recent_rejects.contains(parent_txid)) {
5954 fRejectedParents = true;
5955 break;
5956 }
5957
5958 if (m_recent_rejects_package_reconsiderable.contains(
5959 parent_txid) &&
5960 !m_mempool.exists(parent_txid)) {
5961 // More than 1 parent in
5962 // m_recent_rejects_package_reconsiderable:
5963 // 1p1c will not be sufficient to accept this package,
5964 // so just give up here.
5965 if (rejected_parent_reconsiderable.has_value()) {
5966 fRejectedParents = true;
5967 break;
5968 }
5969 rejected_parent_reconsiderable = parent_txid;
5970 }
5971 }
5972 if (!fRejectedParents) {
5973 const auto current_time{
5974 GetTime<std::chrono::microseconds>()};
5975
5976 for (const TxId &parent_txid : unique_parents) {
5977 // FIXME: MSG_TX should use a TxHash, not a TxId.
5978 AddKnownTx(*peer, parent_txid);
5979 // Exclude m_recent_rejects_package_reconsiderable: the
5980 // missing parent may have been previously rejected for
5981 // being too low feerate. This orphan might CPFP it.
5982 if (!AlreadyHaveTx(parent_txid,
5983 /*include_reconsiderable=*/false)) {
5984 AddTxAnnouncement(pfrom, parent_txid, current_time);
5985 }
5986 }
5987
5988 // NO_THREAD_SAFETY_ANALYSIS because we can't annotate for
5989 // g_msgproc_mutex
5990 if (unsigned int nEvicted =
5991 m_mempool.withOrphanage(
5992 [&](TxOrphanage &orphanage)
5994 if (orphanage.AddTx(ptx,
5995 pfrom.GetId())) {
5996 AddToCompactExtraTransactions(ptx);
5997 }
5998 return orphanage.LimitTxs(
5999 m_opts.max_orphan_txs, m_rng);
6000 }) > 0) {
6002 "orphanage overflow, removed %u tx\n",
6003 nEvicted);
6004 }
6005
6006 // Once added to the orphan pool, a tx is considered
6007 // AlreadyHave, and we shouldn't request it anymore.
6008 m_txrequest.ForgetInvId(tx.GetId());
6009
6010 } else {
6012 "not keeping orphan with rejected parents %s\n",
6013 tx.GetId().ToString());
6014 // We will continue to reject this tx since it has rejected
6015 // parents so avoid re-requesting it from other peers.
6016 m_recent_rejects.insert(tx.GetId());
6017 m_txrequest.ForgetInvId(tx.GetId());
6018 }
6019 }
6020 if (state.IsInvalid()) {
6021 ProcessInvalidTx(pfrom.GetId(), ptx, state,
6022 /*maybe_add_extra_compact_tx=*/true);
6023 }
6024 // When a transaction fails for TX_PACKAGE_RECONSIDERABLE, look for
6025 // a matching child in the orphanage, as it is possible that they
6026 // succeed as a package.
6027 if (state.GetResult() ==
6029 LogPrint(
6031 "tx %s failed but reconsiderable, looking for child in "
6032 "orphanage\n",
6033 txid.ToString());
6034 if (auto package_to_validate{
6035 Find1P1CPackage(ptx, pfrom.GetId())}) {
6036 const auto package_result{ProcessNewPackage(
6037 m_chainman.ActiveChainstate(), m_mempool,
6038 package_to_validate->m_txns, /*test_accept=*/false)};
6040 "package evaluation for %s: %s (%s)\n",
6041 package_to_validate->ToString(),
6042 package_result.m_state.IsValid()
6043 ? "package accepted"
6044 : "package rejected",
6045 package_result.m_state.ToString());
6046 ProcessPackageResult(package_to_validate.value(),
6047 package_result);
6048 }
6049 }
6050
6051 if (state.GetResult() ==
6053 // Once added to the conflicting pool, a tx is considered
6054 // AlreadyHave, and we shouldn't request it anymore.
6055 m_txrequest.ForgetInvId(tx.GetId());
6056
6057 unsigned int nEvicted{0};
6058 // NO_THREAD_SAFETY_ANALYSIS because of g_msgproc_mutex required
6059 // in the lambda for m_rng
6060 m_mempool.withConflicting(
6061 [&](TxConflicting &conflicting) NO_THREAD_SAFETY_ANALYSIS {
6062 conflicting.AddTx(ptx, pfrom.GetId());
6063 nEvicted = conflicting.LimitTxs(
6064 m_opts.max_conflicting_txs, m_rng);
6065 });
6066
6067 if (nEvicted > 0) {
6069 "conflicting pool overflow, removed %u tx\n",
6070 nEvicted);
6071 }
6072 }
6073 } // Release cs_main
6074
6075 return;
6076 }
6077
6078 if (msg_type == NetMsgType::CMPCTBLOCK) {
6079 // Ignore cmpctblock received while importing
6080 if (m_chainman.m_blockman.LoadingBlocks()) {
6082 "Unexpected cmpctblock message received from peer %d\n",
6083 pfrom.GetId());
6084 return;
6085 }
6086
6087 CBlockHeaderAndShortTxIDs cmpctblock;
6088 try {
6089 vRecv >> cmpctblock;
6090 } catch (std::ios_base::failure &e) {
6091 // This block has non contiguous or overflowing indexes
6092 Misbehaving(*peer, "cmpctblock-bad-indexes");
6093 return;
6094 }
6095
6096 bool received_new_header = false;
6097 const auto blockhash = cmpctblock.header.GetHash();
6098
6099 {
6100 LOCK(cs_main);
6101
6102 const CBlockIndex *prev_block =
6103 m_chainman.m_blockman.LookupBlockIndex(
6104 cmpctblock.header.hashPrevBlock);
6105 if (!prev_block) {
6106 // Doesn't connect (or is genesis), instead of DoSing in
6107 // AcceptBlockHeader, request deeper headers
6108 if (!m_chainman.IsInitialBlockDownload()) {
6109 MaybeSendGetHeaders(
6110 pfrom, GetLocator(m_chainman.m_best_header), *peer);
6111 }
6112 return;
6113 }
6114 if (prev_block->nChainWork +
6115 CalculateHeadersWork({cmpctblock.header}) <
6116 GetAntiDoSWorkThreshold()) {
6117 // If we get a low-work header in a compact block, we can ignore
6118 // it.
6120 "Ignoring low-work compact block from peer %d\n",
6121 pfrom.GetId());
6122 return;
6123 }
6124
6125 if (!m_chainman.m_blockman.LookupBlockIndex(blockhash)) {
6126 received_new_header = true;
6127 }
6128 }
6129
6130 const CBlockIndex *pindex = nullptr;
6132 if (!m_chainman.ProcessNewBlockHeaders({cmpctblock.header},
6133 /*min_pow_checked=*/true, state,
6134 &pindex)) {
6135 if (state.IsInvalid()) {
6136 MaybePunishNodeForBlock(pfrom.GetId(), state,
6137 /*via_compact_block*/ true,
6138 "invalid header via cmpctblock");
6139 return;
6140 }
6141 }
6142
6143 if (received_new_header) {
6144 LogInfo("Saw new cmpctblock header hash=%s peer=%d\n",
6145 blockhash.ToString(), pfrom.GetId());
6146 }
6147
6148 // When we succeed in decoding a block's txids from a cmpctblock
6149 // message we typically jump to the BLOCKTXN handling code, with a
6150 // dummy (empty) BLOCKTXN message, to re-use the logic there in
6151 // completing processing of the putative block (without cs_main).
6152 bool fProcessBLOCKTXN = false;
6154
6155 // If we end up treating this as a plain headers message, call that as
6156 // well
6157 // without cs_main.
6158 bool fRevertToHeaderProcessing = false;
6159
6160 // Keep a CBlock for "optimistic" compactblock reconstructions (see
6161 // below)
6162 std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
6163 bool fBlockReconstructed = false;
6164
6165 {
6166 LOCK(cs_main);
6167 // If AcceptBlockHeader returned true, it set pindex
6168 assert(pindex);
6169 UpdateBlockAvailability(pfrom.GetId(), pindex->GetBlockHash());
6170
6171 CNodeState *nodestate = State(pfrom.GetId());
6172
6173 // If this was a new header with more work than our tip, update the
6174 // peer's last block announcement time
6175 if (received_new_header &&
6176 pindex->nChainWork >
6177 m_chainman.ActiveChain().Tip()->nChainWork) {
6178 nodestate->m_last_block_announcement = GetTime();
6179 }
6180
6181 if (pindex->nStatus.hasData()) {
6182 // Nothing to do here
6183 return;
6184 }
6185
6186 auto range_flight =
6187 mapBlocksInFlight.equal_range(pindex->GetBlockHash());
6188 size_t already_in_flight =
6189 std::distance(range_flight.first, range_flight.second);
6190 bool requested_block_from_this_peer{false};
6191
6192 // Multimap ensures ordering of outstanding requests. It's either
6193 // empty or first in line.
6194 bool first_in_flight =
6195 already_in_flight == 0 ||
6196 (range_flight.first->second.first == pfrom.GetId());
6197
6198 while (range_flight.first != range_flight.second) {
6199 if (range_flight.first->second.first == pfrom.GetId()) {
6200 requested_block_from_this_peer = true;
6201 break;
6202 }
6203 range_flight.first++;
6204 }
6205
6206 if (pindex->nChainWork <=
6207 m_chainman.ActiveChain()
6208 .Tip()
6209 ->nChainWork || // We know something better
6210 pindex->nTx != 0) {
6211 // We had this block at some point, but pruned it
6212 if (requested_block_from_this_peer) {
6213 // We requested this block for some reason, but our mempool
6214 // will probably be useless so we just grab the block via
6215 // normal getdata.
6216 std::vector<CInv> vInv(1);
6217 vInv[0] = CInv(MSG_BLOCK, blockhash);
6218 m_connman.PushMessage(
6219 &pfrom, msgMaker.Make(NetMsgType::GETDATA, vInv));
6220 }
6221 return;
6222 }
6223
6224 // If we're not close to tip yet, give up and let parallel block
6225 // fetch work its magic.
6226 if (!already_in_flight && !CanDirectFetch()) {
6227 return;
6228 }
6229
6230 // We want to be a bit conservative just to be extra careful about
6231 // DoS possibilities in compact block processing...
6232 if (pindex->nHeight <= m_chainman.ActiveChain().Height() + 2) {
6233 if ((already_in_flight < MAX_CMPCTBLOCKS_INFLIGHT_PER_BLOCK &&
6234 nodestate->vBlocksInFlight.size() <
6236 requested_block_from_this_peer) {
6237 std::list<QueuedBlock>::iterator *queuedBlockIt = nullptr;
6238 if (!BlockRequested(config, pfrom.GetId(), *pindex,
6239 &queuedBlockIt)) {
6240 if (!(*queuedBlockIt)->partialBlock) {
6241 (*queuedBlockIt)
6242 ->partialBlock.reset(
6243 new PartiallyDownloadedBlock(config,
6244 &m_mempool));
6245 } else {
6246 // The block was already in flight using compact
6247 // blocks from the same peer.
6248 LogPrint(BCLog::NET, "Peer sent us compact block "
6249 "we were already syncing!\n");
6250 return;
6251 }
6252 }
6253
6254 PartiallyDownloadedBlock &partialBlock =
6255 *(*queuedBlockIt)->partialBlock;
6256 ReadStatus status =
6257 partialBlock.InitData(cmpctblock, vExtraTxnForCompact);
6258 if (status == READ_STATUS_INVALID) {
6259 // Reset in-flight state in case Misbehaving does not
6260 // result in a disconnect
6261 RemoveBlockRequest(pindex->GetBlockHash(),
6262 pfrom.GetId());
6263 Misbehaving(*peer, "invalid compact block");
6264 return;
6265 } else if (status == READ_STATUS_FAILED) {
6266 if (first_in_flight) {
6267 // Duplicate txindices, the block is now in-flight,
6268 // so just request it.
6269 std::vector<CInv> vInv(1);
6270 vInv[0] = CInv(MSG_BLOCK, blockhash);
6271 m_connman.PushMessage(
6272 &pfrom,
6273 msgMaker.Make(NetMsgType::GETDATA, vInv));
6274 } else {
6275 // Give up for this peer and wait for other peer(s)
6276 RemoveBlockRequest(pindex->GetBlockHash(),
6277 pfrom.GetId());
6278 }
6279 return;
6280 }
6281
6283 for (size_t i = 0; i < cmpctblock.BlockTxCount(); i++) {
6284 if (!partialBlock.IsTxAvailable(i)) {
6285 req.indices.push_back(i);
6286 }
6287 }
6288 if (req.indices.empty()) {
6289 // Dirty hack to jump to BLOCKTXN code (TODO: move
6290 // message handling into their own functions)
6292 txn.blockhash = blockhash;
6293 blockTxnMsg << txn;
6294 fProcessBLOCKTXN = true;
6295 } else if (first_in_flight) {
6296 // We will try to round-trip any compact blocks we get
6297 // on failure, as long as it's first...
6298 req.blockhash = pindex->GetBlockHash();
6299 m_connman.PushMessage(
6300 &pfrom,
6301 msgMaker.Make(NetMsgType::GETBLOCKTXN, req));
6302 } else if (pfrom.m_bip152_highbandwidth_to &&
6303 (!pfrom.IsInboundConn() ||
6304 IsBlockRequestedFromOutbound(blockhash) ||
6305 already_in_flight <
6307 // ... or it's a hb relay peer and:
6308 // - peer is outbound, or
6309 // - we already have an outbound attempt in flight (so
6310 // we'll take what we can get), or
6311 // - it's not the final parallel download slot (which we
6312 // may reserve for first outbound)
6313 req.blockhash = pindex->GetBlockHash();
6314 m_connman.PushMessage(
6315 &pfrom,
6316 msgMaker.Make(NetMsgType::GETBLOCKTXN, req));
6317 } else {
6318 // Give up for this peer and wait for other peer(s)
6319 RemoveBlockRequest(pindex->GetBlockHash(),
6320 pfrom.GetId());
6321 }
6322 } else {
6323 // This block is either already in flight from a different
6324 // peer, or this peer has too many blocks outstanding to
6325 // download from. Optimistically try to reconstruct anyway
6326 // since we might be able to without any round trips.
6327 PartiallyDownloadedBlock tempBlock(config, &m_mempool);
6328 ReadStatus status =
6329 tempBlock.InitData(cmpctblock, vExtraTxnForCompact);
6330 if (status != READ_STATUS_OK) {
6331 // TODO: don't ignore failures
6332 return;
6333 }
6334 std::vector<CTransactionRef> dummy;
6335 status = tempBlock.FillBlock(*pblock, dummy);
6336 if (status == READ_STATUS_OK) {
6337 fBlockReconstructed = true;
6338 }
6339 }
6340 } else {
6341 if (requested_block_from_this_peer) {
6342 // We requested this block, but its far into the future, so
6343 // our mempool will probably be useless - request the block
6344 // normally.
6345 std::vector<CInv> vInv(1);
6346 vInv[0] = CInv(MSG_BLOCK, blockhash);
6347 m_connman.PushMessage(
6348 &pfrom, msgMaker.Make(NetMsgType::GETDATA, vInv));
6349 return;
6350 } else {
6351 // If this was an announce-cmpctblock, we want the same
6352 // treatment as a header message.
6353 fRevertToHeaderProcessing = true;
6354 }
6355 }
6356 } // cs_main
6357
6358 if (fProcessBLOCKTXN) {
6359 return ProcessMessage(config, pfrom, NetMsgType::BLOCKTXN,
6360 blockTxnMsg, time_received, interruptMsgProc);
6361 }
6362
6363 if (fRevertToHeaderProcessing) {
6364 // Headers received from HB compact block peers are permitted to be
6365 // relayed before full validation (see BIP 152), so we don't want to
6366 // disconnect the peer if the header turns out to be for an invalid
6367 // block. Note that if a peer tries to build on an invalid chain,
6368 // that will be detected and the peer will be banned.
6369 return ProcessHeadersMessage(config, pfrom, *peer,
6370 {cmpctblock.header},
6371 /*via_compact_block=*/true);
6372 }
6373
6374 if (fBlockReconstructed) {
6375 // If we got here, we were able to optimistically reconstruct a
6376 // block that is in flight from some other peer.
6377 {
6378 LOCK(cs_main);
6379 mapBlockSource.emplace(pblock->GetHash(),
6380 std::make_pair(pfrom.GetId(), false));
6381 }
6382 // Setting force_processing to true means that we bypass some of
6383 // our anti-DoS protections in AcceptBlock, which filters
6384 // unrequested blocks that might be trying to waste our resources
6385 // (eg disk space). Because we only try to reconstruct blocks when
6386 // we're close to caught up (via the CanDirectFetch() requirement
6387 // above, combined with the behavior of not requesting blocks until
6388 // we have a chain with at least the minimum chain work), and we
6389 // ignore compact blocks with less work than our tip, it is safe to
6390 // treat reconstructed compact blocks as having been requested.
6391 ProcessBlock(config, pfrom, pblock, /*force_processing=*/true,
6392 /*min_pow_checked=*/true);
6393 // hold cs_main for CBlockIndex::IsValid()
6394 LOCK(cs_main);
6395 if (pindex->IsValid(BlockValidity::TRANSACTIONS)) {
6396 // Clear download state for this block, which is in process from
6397 // some other peer. We do this after calling. ProcessNewBlock so
6398 // that a malleated cmpctblock announcement can't be used to
6399 // interfere with block relay.
6400 RemoveBlockRequest(pblock->GetHash(), std::nullopt);
6401 }
6402 }
6403 return;
6404 }
6405
6406 if (msg_type == NetMsgType::BLOCKTXN) {
6407 // Ignore blocktxn received while importing
6408 if (m_chainman.m_blockman.LoadingBlocks()) {
6410 "Unexpected blocktxn message received from peer %d\n",
6411 pfrom.GetId());
6412 return;
6413 }
6414
6415 BlockTransactions resp;
6416 vRecv >> resp;
6417
6418 std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
6419 bool fBlockRead = false;
6420 {
6421 LOCK(cs_main);
6422
6423 auto range_flight = mapBlocksInFlight.equal_range(resp.blockhash);
6424 size_t already_in_flight =
6425 std::distance(range_flight.first, range_flight.second);
6426 bool requested_block_from_this_peer{false};
6427
6428 // Multimap ensures ordering of outstanding requests. It's either
6429 // empty or first in line.
6430 bool first_in_flight =
6431 already_in_flight == 0 ||
6432 (range_flight.first->second.first == pfrom.GetId());
6433
6434 while (range_flight.first != range_flight.second) {
6435 auto [node_id, block_it] = range_flight.first->second;
6436 if (node_id == pfrom.GetId() && block_it->partialBlock) {
6437 requested_block_from_this_peer = true;
6438 break;
6439 }
6440 range_flight.first++;
6441 }
6442
6443 if (!requested_block_from_this_peer) {
6445 "Peer %d sent us block transactions for block "
6446 "we weren't expecting\n",
6447 pfrom.GetId());
6448 return;
6449 }
6450
6451 PartiallyDownloadedBlock &partialBlock =
6452 *range_flight.first->second.second->partialBlock;
6453 ReadStatus status = partialBlock.FillBlock(*pblock, resp.txn);
6454 if (status == READ_STATUS_INVALID) {
6455 // Reset in-flight state in case of Misbehaving does not
6456 // result in a disconnect.
6457 RemoveBlockRequest(resp.blockhash, pfrom.GetId());
6458 Misbehaving(
6459 *peer,
6460 "invalid compact block/non-matching block transactions");
6461 return;
6462 } else if (status == READ_STATUS_FAILED) {
6463 if (first_in_flight) {
6464 // Might have collided, fall back to getdata now :(
6465 std::vector<CInv> invs;
6466 invs.push_back(CInv(MSG_BLOCK, resp.blockhash));
6467 m_connman.PushMessage(
6468 &pfrom, msgMaker.Make(NetMsgType::GETDATA, invs));
6469 } else {
6470 RemoveBlockRequest(resp.blockhash, pfrom.GetId());
6471 LogPrint(
6472 BCLog::NET,
6473 "Peer %d sent us a compact block but it failed to "
6474 "reconstruct, waiting on first download to complete\n",
6475 pfrom.GetId());
6476 return;
6477 }
6478 } else {
6479 // Block is either okay, or possibly we received
6480 // READ_STATUS_CHECKBLOCK_FAILED.
6481 // Note that CheckBlock can only fail for one of a few reasons:
6482 // 1. bad-proof-of-work (impossible here, because we've already
6483 // accepted the header)
6484 // 2. merkleroot doesn't match the transactions given (already
6485 // caught in FillBlock with READ_STATUS_FAILED, so
6486 // impossible here)
6487 // 3. the block is otherwise invalid (eg invalid coinbase,
6488 // block is too big, too many sigChecks, etc).
6489 // So if CheckBlock failed, #3 is the only possibility.
6490 // Under BIP 152, we don't DoS-ban unless proof of work is
6491 // invalid (we don't require all the stateless checks to have
6492 // been run). This is handled below, so just treat this as
6493 // though the block was successfully read, and rely on the
6494 // handling in ProcessNewBlock to ensure the block index is
6495 // updated, etc.
6496
6497 // it is now an empty pointer
6498 RemoveBlockRequest(resp.blockhash, pfrom.GetId());
6499 fBlockRead = true;
6500 // mapBlockSource is used for potentially punishing peers and
6501 // updating which peers send us compact blocks, so the race
6502 // between here and cs_main in ProcessNewBlock is fine.
6503 // BIP 152 permits peers to relay compact blocks after
6504 // validating the header only; we should not punish peers
6505 // if the block turns out to be invalid.
6506 mapBlockSource.emplace(resp.blockhash,
6507 std::make_pair(pfrom.GetId(), false));
6508 }
6509 } // Don't hold cs_main when we call into ProcessNewBlock
6510 if (fBlockRead) {
6511 // Since we requested this block (it was in mapBlocksInFlight),
6512 // force it to be processed, even if it would not be a candidate for
6513 // new tip (missing previous block, chain not long enough, etc)
6514 // This bypasses some anti-DoS logic in AcceptBlock (eg to prevent
6515 // disk-space attacks), but this should be safe due to the
6516 // protections in the compact block handler -- see related comment
6517 // in compact block optimistic reconstruction handling.
6518 ProcessBlock(config, pfrom, pblock, /*force_processing=*/true,
6519 /*min_pow_checked=*/true);
6520 }
6521 return;
6522 }
6523
6524 if (msg_type == NetMsgType::HEADERS) {
6525 // Ignore headers received while importing
6526 if (m_chainman.m_blockman.LoadingBlocks()) {
6528 "Unexpected headers message received from peer %d\n",
6529 pfrom.GetId());
6530 return;
6531 }
6532
6533 std::vector<CBlockHeader> headers;
6534
6535 // Bypass the normal CBlock deserialization, as we don't want to risk
6536 // deserializing 2000 full blocks.
6537 unsigned int nCount = ReadCompactSize(vRecv);
6538 if (nCount > MAX_HEADERS_RESULTS) {
6539 Misbehaving(*peer,
6540 strprintf("too-many-headers: headers message size = %u",
6541 nCount));
6542 return;
6543 }
6544 headers.resize(nCount);
6545 for (unsigned int n = 0; n < nCount; n++) {
6546 vRecv >> headers[n];
6547 // Ignore tx count; assume it is 0.
6548 ReadCompactSize(vRecv);
6549 }
6550
6551 ProcessHeadersMessage(config, pfrom, *peer, std::move(headers),
6552 /*via_compact_block=*/false);
6553
6554 // Check if the headers presync progress needs to be reported to
6555 // validation. This needs to be done without holding the
6556 // m_headers_presync_mutex lock.
6557 if (m_headers_presync_should_signal.exchange(false)) {
6558 HeadersPresyncStats stats;
6559 {
6560 LOCK(m_headers_presync_mutex);
6561 auto it =
6562 m_headers_presync_stats.find(m_headers_presync_bestpeer);
6563 if (it != m_headers_presync_stats.end()) {
6564 stats = it->second;
6565 }
6566 }
6567 if (stats.second) {
6568 m_chainman.ReportHeadersPresync(
6569 stats.first, stats.second->first, stats.second->second);
6570 }
6571 }
6572
6573 return;
6574 }
6575
6576 if (msg_type == NetMsgType::BLOCK) {
6577 // Ignore block received while importing
6578 if (m_chainman.m_blockman.LoadingBlocks()) {
6580 "Unexpected block message received from peer %d\n",
6581 pfrom.GetId());
6582 return;
6583 }
6584
6585 std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
6586 vRecv >> *pblock;
6587
6588 LogPrint(BCLog::NET, "received block %s peer=%d\n",
6589 pblock->GetHash().ToString(), pfrom.GetId());
6590
6591 const CBlockIndex *prev_block{
6592 WITH_LOCK(m_chainman.GetMutex(),
6593 return m_chainman.m_blockman.LookupBlockIndex(
6594 pblock->hashPrevBlock))};
6595
6596 if (IsBlockMutated(/*block=*/*pblock)) {
6598 "Received mutated block from peer=%d\n", peer->m_id);
6599 Misbehaving(*peer, "mutated block");
6601 RemoveBlockRequest(pblock->GetHash(), peer->m_id));
6602 return;
6603 }
6604
6605 // Process all blocks from whitelisted peers, even if not requested,
6606 // unless we're still syncing with the network. Such an unrequested
6607 // block may still be processed, subject to the conditions in
6608 // AcceptBlock().
6609 bool forceProcessing = pfrom.HasPermission(NetPermissionFlags::NoBan) &&
6610 !m_chainman.IsInitialBlockDownload();
6611 const BlockHash hash = pblock->GetHash();
6612 bool min_pow_checked = false;
6613 {
6614 LOCK(cs_main);
6615 // Always process the block if we requested it, since we may
6616 // need it even when it's not a candidate for a new best tip.
6617 forceProcessing = IsBlockRequested(hash);
6618 RemoveBlockRequest(hash, pfrom.GetId());
6619 // mapBlockSource is only used for punishing peers and setting
6620 // which peers send us compact blocks, so the race between here and
6621 // cs_main in ProcessNewBlock is fine.
6622 mapBlockSource.emplace(hash, std::make_pair(pfrom.GetId(), true));
6623
6624 // Check work on this block against our anti-dos thresholds.
6625 if (prev_block &&
6626 prev_block->nChainWork +
6627 CalculateHeadersWork({pblock->GetBlockHeader()}) >=
6628 GetAntiDoSWorkThreshold()) {
6629 min_pow_checked = true;
6630 }
6631 }
6632 ProcessBlock(config, pfrom, pblock, forceProcessing, min_pow_checked);
6633 return;
6634 }
6635
6636 if (msg_type == NetMsgType::AVAHELLO) {
6637 if (!m_avalanche) {
6638 return;
6639 }
6640 {
6642 if (pfrom.m_avalanche_pubkey.has_value()) {
6643 LogPrint(
6645 "Ignoring avahello from peer %d: already in our node set\n",
6646 pfrom.GetId());
6647 return;
6648 }
6649
6650 avalanche::Delegation delegation;
6651 vRecv >> delegation;
6652
6653 // A delegation with an all zero limited id indicates that the peer
6654 // has no proof, so we're done.
6655 if (delegation.getLimitedProofId() != uint256::ZERO) {
6657 CPubKey pubkey;
6658 if (!delegation.verify(state, pubkey)) {
6659 Misbehaving(*peer, "invalid-delegation");
6660 return;
6661 }
6662 pfrom.m_avalanche_pubkey = std::move(pubkey);
6663
6664 HashWriter sighasher{};
6665 sighasher << delegation.getId();
6666 sighasher << pfrom.nRemoteHostNonce;
6667 sighasher << pfrom.GetLocalNonce();
6668 sighasher << pfrom.nRemoteExtraEntropy;
6669 sighasher << pfrom.GetLocalExtraEntropy();
6670
6672 vRecv >> sig;
6673 if (!(*pfrom.m_avalanche_pubkey)
6674 .VerifySchnorr(sighasher.GetHash(), sig)) {
6675 Misbehaving(*peer, "invalid-avahello-signature");
6676 return;
6677 }
6678
6679 // If we don't know this proof already, add it to the tracker so
6680 // it can be requested.
6681 const avalanche::ProofId proofid(delegation.getProofId());
6682 if (!AlreadyHaveProof(proofid)) {
6683 const bool preferred = isPreferredDownloadPeer(pfrom);
6684 LOCK(cs_proofrequest);
6685 AddProofAnnouncement(pfrom, proofid,
6686 GetTime<std::chrono::microseconds>(),
6687 preferred);
6688 }
6689
6690 // Don't check the return value. If it fails we probably don't
6691 // know about the proof yet.
6692 m_avalanche->withPeerManager([&](avalanche::PeerManager &pm) {
6693 return pm.addNode(pfrom.GetId(), proofid);
6694 });
6695 }
6696
6697 pfrom.m_avalanche_enabled = true;
6698 }
6699
6700 // Send getavaaddr and getavaproofs to our avalanche outbound or
6701 // manual connections
6702 if (!pfrom.IsInboundConn()) {
6703 m_connman.PushMessage(&pfrom,
6704 msgMaker.Make(NetMsgType::GETAVAADDR));
6705 WITH_LOCK(peer->m_addr_token_bucket_mutex,
6706 peer->m_addr_token_bucket += m_opts.max_addr_to_send);
6707
6708 if (peer->m_proof_relay && !m_chainman.IsInitialBlockDownload()) {
6709 m_connman.PushMessage(&pfrom,
6710 msgMaker.Make(NetMsgType::GETAVAPROOFS));
6711 peer->m_proof_relay->compactproofs_requested = true;
6712 }
6713 }
6714
6715 return;
6716 }
6717
6718 if (msg_type == NetMsgType::AVAPOLL) {
6719 if (!m_avalanche) {
6720 return;
6721 }
6722 const auto now = Now<SteadyMilliseconds>();
6723
6724 const auto last_poll = pfrom.m_last_poll;
6725 pfrom.m_last_poll = now;
6726
6727 if (now <
6728 last_poll + std::chrono::milliseconds(m_opts.avalanche_cooldown)) {
6730 "Ignoring repeated avapoll from peer %d: cooldown not "
6731 "elapsed\n",
6732 pfrom.GetId());
6733 return;
6734 }
6735
6736 const bool quorum_established = m_avalanche->isQuorumEstablished();
6737
6738 uint64_t round;
6739 Unserialize(vRecv, round);
6740
6741 unsigned int nCount = ReadCompactSize(vRecv);
6742 if (nCount > AVALANCHE_MAX_ELEMENT_POLL) {
6743 Misbehaving(
6744 *peer,
6745 strprintf("too-many-ava-poll: poll message size = %u", nCount));
6746 return;
6747 }
6748
6749 std::vector<avalanche::Vote> votes;
6750 votes.reserve(nCount);
6751
6752 bool fPreconsensus{false};
6753 bool fStakingPreconsensus{false};
6754 {
6755 LOCK(::cs_main);
6756 const CBlockIndex *tip = m_chainman.ActiveTip();
6757 fPreconsensus = m_avalanche->isPreconsensusActivated(tip);
6758 fStakingPreconsensus =
6759 m_avalanche->isStakingPreconsensusActivated(tip);
6760 }
6761
6762 for (unsigned int n = 0; n < nCount; n++) {
6763 CInv inv;
6764 vRecv >> inv;
6765
6766 // Default vote for unknown inv type
6767 uint32_t vote = -1;
6768
6769 // We don't vote definitively until we have an established quorum
6770 if (!quorum_established) {
6771 votes.emplace_back(vote, inv.hash);
6772 continue;
6773 }
6774
6775 // If inv's type is known, get a vote for its hash
6776 switch (inv.type) {
6777 case MSG_TX: {
6778 if (fPreconsensus) {
6779 vote =
6780 GetAvalancheVoteForTx(*m_avalanche, TxId(inv.hash));
6781 }
6782 } break;
6783 case MSG_BLOCK: {
6784 vote = WITH_LOCK(cs_main, return GetAvalancheVoteForBlock(
6785 BlockHash(inv.hash)));
6786 } break;
6787 case MSG_AVA_PROOF: {
6789 *m_avalanche, avalanche::ProofId(inv.hash));
6790 } break;
6792 if (fStakingPreconsensus) {
6793 vote = m_avalanche->getStakeContenderStatus(
6795 }
6796 } break;
6797 default: {
6799 "poll inv type %d unknown from peer=%d\n",
6800 inv.type, pfrom.GetId());
6801 }
6802 }
6803
6804 votes.emplace_back(vote, inv.hash);
6805 }
6806
6807 // Send the query to the node.
6808 m_avalanche->sendResponse(
6809 &pfrom, avalanche::Response(round, m_opts.avalanche_cooldown,
6810 std::move(votes)));
6811 return;
6812 }
6813
6814 if (msg_type == NetMsgType::AVARESPONSE) {
6815 if (!m_avalanche) {
6816 return;
6817 }
6818 // As long as QUIC is not implemented, we need to sign response and
6819 // verify response's signatures in order to avoid any manipulation of
6820 // messages at the transport level.
6821 CHashVerifier<CDataStream> verifier(&vRecv);
6823 verifier >> response;
6824
6826 vRecv >> sig;
6827
6828 {
6830 if (!pfrom.m_avalanche_pubkey.has_value() ||
6831 !(*pfrom.m_avalanche_pubkey)
6832 .VerifySchnorr(verifier.GetHash(), sig)) {
6833 Misbehaving(*peer, "invalid-ava-response-signature");
6834 return;
6835 }
6836 }
6837
6838 auto now = GetTime<std::chrono::seconds>();
6839
6840 std::vector<avalanche::VoteItemUpdate> updates;
6841 bool disconnect{false};
6842 std::string error;
6843 if (!m_avalanche->registerVotes(pfrom.GetId(), response, updates,
6844 disconnect, error)) {
6845 if (disconnect) {
6846 Misbehaving(*peer, error);
6847 return;
6848 }
6849
6850 // Otherwise the node may have got a network issue. Increase the
6851 // fault counter instead and only ban if we reached a threshold.
6852 // This allows for fault tolerance should there be a temporary
6853 // outage while still preventing DoS'ing behaviors, as the counter
6854 // is reset if no fault occured over some time period.
6857
6858 // Allow up to 12 messages before increasing the ban score. Since
6859 // the queries are cleared after 10s, this is at least 2 minutes
6860 // of network outage tolerance over the 1h window.
6861 if (pfrom.m_avalanche_message_fault_counter > 12) {
6862 LogPrint(
6864 "Repeated failure to register votes from peer %d: %s\n",
6865 pfrom.GetId(), error);
6867 if (pfrom.m_avalanche_message_fault_score > 100) {
6868 Misbehaving(*peer, error);
6869 }
6870 return;
6871 }
6872 }
6873
6874 // If no fault occurred within the last hour, reset the fault counter
6875 if (now > (pfrom.m_avalanche_last_message_fault.load() + 1h)) {
6877 }
6878
6879 pfrom.invsVoted(response.GetVotes().size());
6880
6881 auto logVoteUpdate = [](const auto &voteUpdate,
6882 const std::string &voteItemTypeStr,
6883 const auto &voteItemId) {
6884 std::string voteOutcome;
6885 bool alwaysPrint = false;
6886 switch (voteUpdate.getStatus()) {
6888 voteOutcome = "invalidated";
6889 alwaysPrint = true;
6890 break;
6892 voteOutcome = "rejected";
6893 break;
6895 voteOutcome = "accepted";
6896 break;
6898 voteOutcome = "finalized";
6899 // Don't log tx finalization unconditionally as it can be
6900 // quite spammy.
6901 alwaysPrint = voteItemTypeStr != "tx";
6902 break;
6904 voteOutcome = "stalled";
6905 alwaysPrint = true;
6906 break;
6907
6908 // No default case, so the compiler can warn about missing
6909 // cases
6910 }
6911
6912 // Always log the stake contenders to the avalanche category
6913 alwaysPrint &= (voteItemTypeStr != "contender");
6914
6915 if (alwaysPrint) {
6916 LogPrintf("Avalanche %s %s %s\n", voteOutcome, voteItemTypeStr,
6917 voteItemId.ToString());
6918 } else {
6919 // Only print these messages if -debug=avalanche is set
6920 LogPrint(BCLog::AVALANCHE, "Avalanche %s %s %s\n", voteOutcome,
6921 voteItemTypeStr, voteItemId.ToString());
6922 }
6923 };
6924
6925 bool shouldActivateBestChain = false;
6926
6927 bool fPreconsensus{false};
6928 bool fStakingPreconsensus{false};
6929 {
6930 LOCK(::cs_main);
6931 const CBlockIndex *tip = m_chainman.ActiveTip();
6932 fPreconsensus = m_avalanche->isPreconsensusActivated(tip);
6933 fStakingPreconsensus =
6934 m_avalanche->isStakingPreconsensusActivated(tip);
6935 }
6936
6937 for (const auto &u : updates) {
6938 const avalanche::AnyVoteItem &item = u.getVoteItem();
6939
6940 // Don't use a visitor here as we want to ignore unsupported item
6941 // types. This comes in handy when adding new types.
6942 if (auto pitem = std::get_if<const avalanche::ProofRef>(&item)) {
6943 avalanche::ProofRef proof = *pitem;
6944 const avalanche::ProofId &proofid = proof->getId();
6945
6946 logVoteUpdate(u, "proof", proofid);
6947
6948 auto rejectionMode =
6950 auto nextCooldownTimePoint = GetTime<std::chrono::seconds>();
6951 switch (u.getStatus()) {
6953 m_avalanche->withPeerManager(
6954 [&](avalanche::PeerManager &pm) {
6955 pm.setInvalid(proofid);
6956 });
6957 // Fallthrough
6959 // Invalidate mode removes the proof from all proof
6960 // pools
6961 rejectionMode =
6963 // Fallthrough
6965 if (!m_avalanche->withPeerManager(
6966 [&](avalanche::PeerManager &pm) {
6967 return pm.rejectProof(proofid,
6968 rejectionMode);
6969 })) {
6971 "ERROR: Failed to reject proof: %s\n",
6972 proofid.GetHex());
6973 }
6974 break;
6976 m_avalanche->setRecentlyFinalized(proofid);
6977 nextCooldownTimePoint += std::chrono::seconds(
6978 m_opts.avalanche_peer_replacement_cooldown);
6980 if (!m_avalanche->withPeerManager(
6981 [&](avalanche::PeerManager &pm) {
6982 pm.registerProof(
6983 proof,
6984 avalanche::PeerManager::
6985 RegistrationMode::FORCE_ACCEPT);
6986 return pm.forPeer(
6987 proofid,
6988 [&](const avalanche::Peer &peer) {
6989 pm.updateNextPossibleConflictTime(
6990 peer.peerid,
6991 nextCooldownTimePoint);
6992 if (u.getStatus() ==
6993 avalanche::VoteStatus::
6994 Finalized) {
6995 pm.setFinalized(peer.peerid);
6996 }
6997 // Only fail if the peer was not
6998 // created
6999 return true;
7000 });
7001 })) {
7003 "ERROR: Failed to accept proof: %s\n",
7004 proofid.GetHex());
7005 }
7006 break;
7007 }
7008 }
7009
7010 auto getBlockFromIndex = [this](const CBlockIndex *pindex) {
7011 // First check if the block is cached before reading
7012 // from disk.
7013 std::shared_ptr<const CBlock> pblock = WITH_LOCK(
7014 m_most_recent_block_mutex, return m_most_recent_block);
7015
7016 if (!pblock || pblock->GetHash() != pindex->GetBlockHash()) {
7017 std::shared_ptr<CBlock> pblockRead =
7018 std::make_shared<CBlock>();
7019 if (!m_chainman.m_blockman.ReadBlockFromDisk(*pblockRead,
7020 *pindex)) {
7021 assert(!"cannot load block from disk");
7022 }
7023 pblock = pblockRead;
7024 }
7025 return pblock;
7026 };
7027
7028 if (auto pitem = std::get_if<const CBlockIndex *>(&item)) {
7029 CBlockIndex *pindex = const_cast<CBlockIndex *>(*pitem);
7030
7031 shouldActivateBestChain = true;
7032
7033 logVoteUpdate(u, "block", pindex->GetBlockHash());
7034
7035 switch (u.getStatus()) {
7038 m_chainman.ActiveChainstate().ParkBlock(state, pindex);
7039 if (!state.IsValid()) {
7040 LogPrintf("ERROR: Database error: %s\n",
7041 state.GetRejectReason());
7042 return;
7043 }
7044 } break;
7047 m_chainman.ActiveChainstate().ParkBlock(state, pindex);
7048 if (!state.IsValid()) {
7049 LogPrintf("ERROR: Database error: %s\n",
7050 state.GetRejectReason());
7051 return;
7052 }
7053
7054 auto pblock = getBlockFromIndex(pindex);
7055 assert(pblock);
7056
7057 WITH_LOCK(cs_main, GetMainSignals().BlockInvalidated(
7058 pindex, pblock));
7059 } break;
7061 LOCK(cs_main);
7062 m_chainman.ActiveChainstate().UnparkBlock(pindex);
7063 } break;
7065 m_avalanche->setRecentlyFinalized(
7066 pindex->GetBlockHash());
7067
7068 m_avalanche->cleanupStakingRewards(pindex->nHeight);
7069
7070 std::unique_ptr<node::CBlockTemplate> blockTemplate;
7071 {
7072 LOCK(cs_main);
7073 auto &chainstate = m_chainman.ActiveChainstate();
7074 chainstate.UnparkBlock(pindex);
7075
7076 const bool newlyFinalized =
7077 !chainstate.IsBlockAvalancheFinalized(pindex) &&
7078 chainstate.AvalancheFinalizeBlock(pindex,
7079 *m_avalanche);
7080
7081 // Skip if the block is already finalized, aka an
7082 // ancestor of the finalized tip.
7083 if (fPreconsensus && newlyFinalized) {
7084 auto pblock = getBlockFromIndex(pindex);
7085 assert(pblock);
7086
7087 {
7088 // If the finalized block is not the tip, we
7089 // need to keep track of the transactions
7090 // from the non final blocks, so that we can
7091 // check if they were finalized by
7092 // pre-consensus. If these transactions were
7093 // pruned from the radix tree, their
7094 // finalization status could be lost in the
7095 // case the non final blocks are later
7096 // rejected.
7097 CBlockIndex *tip = m_chainman.ActiveTip();
7098 std::unordered_set<TxId, SaltedTxIdHasher>
7099 confirmedTxIdsInNonFinalizedBlocks;
7100 for (const CBlockIndex *block = tip;
7101 block != nullptr && block != pindex;
7102 block = block->pprev) {
7103 auto currentBlock =
7104 getBlockFromIndex(block);
7105 assert(currentBlock);
7106 for (const auto &tx :
7107 currentBlock->vtx) {
7108 confirmedTxIdsInNonFinalizedBlocks
7109 .insert(tx->GetId());
7110 }
7111 }
7112
7113 // Remove the transactions that are not
7114 // confirmed
7115 LOCK(m_mempool.cs);
7116 m_mempool.removeForFinalizedBlock(
7117 confirmedTxIdsInNonFinalizedBlocks);
7118
7119 // Now add mempool transactions to the poll.
7120 // To determine which transaction to add, we
7121 // leverage the legacy block template
7122 // construction method and build a template
7123 // with the most valuable txs in it. These
7124 // transactions are sorted topologically;
7125 // parents come before children, so we can
7126 // poll for children first and optimize the
7127 // number of polls.
7128 node::BlockAssembler blockAssembler(
7129 config, chainstate, &m_mempool,
7130 m_avalanche);
7131 blockAssembler.pblocktemplate.reset(
7132 new node::CBlockTemplate());
7133
7134 if (blockAssembler.pblocktemplate) {
7135 blockAssembler.addTxs(m_mempool);
7136 blockTemplate = std::move(
7137 blockAssembler.pblocktemplate);
7138 }
7139 }
7140 }
7141 } // release cs_main
7142
7143 if (blockTemplate) {
7144 // We could check if the tx is final already
7145 // but addToReconcile will skip the recently
7146 // finalized txs, so let's abuse this
7147 // feature and avoid a tree lookup for each
7148 // tx as an optimization.
7149 for (const auto &templateEntry :
7150 reverse_iterate(blockTemplate->entries)) {
7151 m_avalanche->addToReconcile(templateEntry.tx);
7152 }
7153 }
7154 } break;
7156 // Fall back on Nakamoto consensus in the absence of
7157 // Avalanche votes for other competing or descendant
7158 // blocks.
7159 break;
7160 }
7161 }
7162
7163 if (fStakingPreconsensus) {
7164 if (auto pitem =
7165 std::get_if<const avalanche::StakeContenderId>(&item)) {
7166 const avalanche::StakeContenderId contenderId = *pitem;
7167 logVoteUpdate(u, "contender", contenderId);
7168
7169 switch (u.getStatus()) {
7172 m_avalanche->rejectStakeContender(contenderId);
7173 break;
7174 }
7176 m_avalanche->setRecentlyFinalized(contenderId);
7177 m_avalanche->finalizeStakeContender(contenderId);
7178 break;
7179 }
7181 m_avalanche->acceptStakeContender(contenderId);
7182 break;
7183 }
7185 break;
7186 }
7187 }
7188 }
7189
7190 if (!fPreconsensus) {
7191 continue;
7192 }
7193
7194 if (auto pitem = std::get_if<const CTransactionRef>(&item)) {
7195 const CTransactionRef tx = *pitem;
7196 assert(tx != nullptr);
7197
7198 const TxId &txid = tx->GetId();
7199 const auto status{u.getStatus()};
7200
7201 if (status != avalanche::VoteStatus::Finalized) {
7202 // Because we also want to log the parents txs of this
7203 // finalized tx, we log the finalization later.
7204 logVoteUpdate(u, "tx", txid);
7205 }
7206
7207 switch (status) {
7208 case avalanche::VoteStatus::Invalid: // Fallthrough
7210 // Remove from the mempool and the finalized tree, as
7211 // well as all the children txs. Note that removal from
7212 // the finalized tree is only a safety net and should
7213 // never happen.
7214 LOCK2(cs_main, m_mempool.cs);
7215 if (m_mempool.exists(txid)) {
7216 m_mempool.removeRecursive(
7218
7219 std::vector<CTransactionRef> conflictingTxs =
7220 m_mempool.withConflicting(
7221 [&tx](const TxConflicting &conflicting) {
7222 return conflicting.GetConflictTxs(tx);
7223 });
7224
7225 if (conflictingTxs.size() > 0) {
7226 // Pull the first tx only, erase the others so
7227 // they can be re-downloaded if needed.
7228 auto result = m_chainman.ProcessTransaction(
7229 conflictingTxs[0]);
7230 assert(result.m_state.IsValid());
7231 }
7232
7233 m_mempool.withConflicting(
7234 [&conflictingTxs,
7235 &tx](TxConflicting &conflicting) {
7236 for (const auto &conflictingTx :
7237 conflictingTxs) {
7238 conflicting.EraseTx(
7239 conflictingTx->GetId());
7240 }
7241
7242 // Note that we don't store the descendants,
7243 // which should be re-downloaded. This could
7244 // be optimized but we will have to manage
7245 // the topological ordering.
7246 conflicting.AddTx(tx, NO_NODE);
7247 });
7248 }
7249
7250 if (status == avalanche::VoteStatus::Invalid) {
7251 // Also remove from the conflicting pool. If it was
7252 // in the mempool (unlikely) we just moved it there.
7253 m_mempool.withConflicting(
7254 [&txid](TxConflicting &conflicting) {
7255 conflicting.EraseTx(txid);
7256 });
7257
7258 m_recent_rejects.insert(txid);
7259
7260 CCoinsViewMemPool coinViewMempool(
7261 &m_chainman.ActiveChainstate().CoinsTip(),
7262 m_mempool);
7263 CCoinsViewCache coinViewCache(&coinViewMempool);
7264 auto spentCoins =
7265 std::make_shared<const std::vector<Coin>>(
7266 GetSpentCoins(tx, coinViewCache));
7267
7269 spentCoins);
7270 }
7271
7272 break;
7273 }
7275 // fallthrough
7277 {
7278 LOCK2(cs_main, m_mempool.cs);
7279 if (m_mempool.withConflicting(
7280 [&txid](const TxConflicting &conflicting) {
7281 return conflicting.HaveTx(txid);
7282 })) {
7283 // Swap conflicting txs from/to the mempool
7284 std::vector<CTransactionRef>
7285 mempool_conflicting_txs;
7286 for (const auto &txin : tx->vin) {
7287 // Find the conflicting txs
7288 if (CTransactionRef conflict =
7289 m_mempool.GetConflictTx(
7290 txin.prevout)) {
7291 mempool_conflicting_txs.push_back(
7292 std::move(conflict));
7293 }
7294 }
7295 m_mempool.removeConflicts(*tx);
7296
7297 auto result = m_chainman.ProcessTransaction(tx);
7298 assert(result.m_state.IsValid());
7299
7300 m_mempool.withConflicting(
7301 [&txid, &mempool_conflicting_txs](
7302 TxConflicting &conflicting) {
7303 conflicting.EraseTx(txid);
7304 // Store the first tx only, the others
7305 // can be re-downloaded if needed.
7306 if (mempool_conflicting_txs.size() >
7307 0) {
7308 conflicting.AddTx(
7309 mempool_conflicting_txs[0],
7310 NO_NODE);
7311 }
7312 });
7313 }
7314 }
7315
7316 if (status == avalanche::VoteStatus::Finalized) {
7317 LOCK2(cs_main, m_mempool.cs);
7318 auto it = m_mempool.GetIter(txid);
7319 if (!it.has_value()) {
7320 LogPrint(
7322 "Error: finalized tx (%s) is not in the "
7323 "mempool\n",
7324 txid.ToString());
7325 break;
7326 }
7327
7328 std::vector<TxId> finalizedTxIds;
7329 m_mempool.setAvalancheFinalized(
7330 **it, m_chainparams.GetConsensus(),
7331 *Assert(m_chainman.ActiveTip()),
7332 finalizedTxIds);
7333
7334 for (const auto &finalized_txid : finalizedTxIds) {
7335 m_avalanche->setRecentlyFinalized(
7336 finalized_txid);
7337 // Log the parent tx being implicitely finalized
7338 // as well
7339 logVoteUpdate(u, "tx", finalized_txid);
7340 }
7341
7342 // NO_THREAD_SAFETY_ANALYSIS because
7343 // m_recent_rejects requires cs_main in the lambda
7344 m_mempool.withConflicting(
7345 [&](TxConflicting &conflicting)
7347 std::vector<CTransactionRef>
7348 conflictingTxs =
7349 conflicting.GetConflictTxs(tx);
7350 for (const auto &conflictingTx :
7351 conflictingTxs) {
7352 m_recent_rejects.insert(
7353 conflictingTx->GetId());
7354 conflicting.EraseTx(
7355 conflictingTx->GetId());
7356 }
7357 });
7358 }
7359
7360 break;
7361 }
7363 LOCK(cs_main);
7364
7365 // If the tx is stale, there is no point keeping it
7366 // around as it will no be mined. Let's remove it but
7367 // also forget we got it so it can be eventually
7368 // re-downloaded.
7369 {
7370 LOCK(m_mempool.cs);
7371 m_mempool.removeRecursive(
7373
7374 m_mempool.withConflicting(
7375 [&txid](TxConflicting &conflicting) {
7376 conflicting.EraseTx(txid);
7377 });
7378 }
7379
7380 // Make sure we can request this tx again
7381 m_txrequest.ForgetInvId(txid);
7382
7383 {
7384 // Save the stalled txids so that we can relay them
7385 // to our peers.
7386 LOCK(m_peer_mutex);
7387 for (auto &it : m_peer_map) {
7388 auto tx_relay = (*it.second).GetTxRelay();
7389 if (!tx_relay) {
7390 continue;
7391 }
7392
7393 LOCK(tx_relay->m_tx_inventory_mutex);
7394
7395 // We limit the size of the stalled txs set to
7396 // avoid unbounded memory growth. In practice,
7397 // this should not be an issue as stalled txs
7398 // should be few and far between. If we are at
7399 // the limit, remove the oldest entries.
7400 auto &stalled_by_time =
7401 tx_relay->m_avalanche_stalled_txids
7402 .get<by_time>();
7403 if (stalled_by_time.size() >=
7405 stalled_by_time.erase(
7406 stalled_by_time.begin()->timeAdded);
7407 }
7408
7409 tx_relay->m_avalanche_stalled_txids.insert(
7410 {txid, now});
7411 }
7412 }
7413
7414 break;
7415 }
7416 }
7417 }
7418 }
7419
7420 if (shouldActivateBestChain) {
7422 if (!m_chainman.ActiveChainstate().ActivateBestChain(
7423 state, /*pblock=*/nullptr, m_avalanche)) {
7424 LogPrintf("failed to activate chain (%s)\n", state.ToString());
7425 }
7426 }
7427
7428 return;
7429 }
7430
7431 if (msg_type == NetMsgType::AVAPROOF) {
7432 if (!m_avalanche) {
7433 return;
7434 }
7435 auto proof = RCUPtr<avalanche::Proof>::make();
7436 vRecv >> *proof;
7437
7438 ReceivedAvalancheProof(pfrom, *peer, proof);
7439
7440 return;
7441 }
7442
7443 if (msg_type == NetMsgType::GETAVAPROOFS) {
7444 if (!m_avalanche) {
7445 return;
7446 }
7447 if (peer->m_proof_relay == nullptr) {
7448 return;
7449 }
7450
7451 peer->m_proof_relay->lastSharedProofsUpdate =
7452 GetTime<std::chrono::seconds>();
7453
7454 peer->m_proof_relay->sharedProofs =
7455 m_avalanche->withPeerManager([&](const avalanche::PeerManager &pm) {
7456 return pm.getShareableProofsSnapshot();
7457 });
7458
7459 avalanche::CompactProofs compactProofs(
7460 peer->m_proof_relay->sharedProofs);
7461 m_connman.PushMessage(
7462 &pfrom, msgMaker.Make(NetMsgType::AVAPROOFS, compactProofs));
7463
7464 return;
7465 }
7466
7467 if (msg_type == NetMsgType::AVAPROOFS) {
7468 if (!m_avalanche) {
7469 return;
7470 }
7471 if (peer->m_proof_relay == nullptr) {
7472 return;
7473 }
7474
7475 // Only process the compact proofs if we requested them
7476 if (!peer->m_proof_relay->compactproofs_requested) {
7477 LogPrint(BCLog::AVALANCHE, "Ignoring unsollicited avaproofs\n");
7478 return;
7479 }
7480 peer->m_proof_relay->compactproofs_requested = false;
7481
7482 avalanche::CompactProofs compactProofs;
7483 try {
7484 vRecv >> compactProofs;
7485 } catch (std::ios_base::failure &e) {
7486 // This compact proofs have non contiguous or overflowing indexes
7487 Misbehaving(*peer, "avaproofs-bad-indexes");
7488 return;
7489 }
7490
7491 // If there are prefilled proofs, process them first
7492 for (const auto &prefilledProof : compactProofs.getPrefilledProofs()) {
7493 if (!ReceivedAvalancheProof(pfrom, *peer, prefilledProof.proof)) {
7494 // If we got an invalid proof, the peer is getting banned and we
7495 // can bail out.
7496 return;
7497 }
7498 }
7499
7500 // If there is no shortid, avoid parsing/responding/accounting for the
7501 // message.
7502 if (compactProofs.getShortIDs().size() == 0) {
7503 return;
7504 }
7505
7506 // To determine the chance that the number of entries in a bucket
7507 // exceeds N, we use the fact that the number of elements in a single
7508 // bucket is binomially distributed (with n = the number of shorttxids
7509 // S, and p = 1 / the number of buckets), that in the worst case the
7510 // number of buckets is equal to S (due to std::unordered_map having a
7511 // default load factor of 1.0), and that the chance for any bucket to
7512 // exceed N elements is at most buckets * (the chance that any given
7513 // bucket is above N elements). Thus:
7514 // P(max_elements_per_bucket > N) <=
7515 // S * (1 - cdf(binomial(n=S,p=1/S), N))
7516 // If we assume up to 21000000, allowing 15 elements per bucket should
7517 // only fail once per ~2.5 million avaproofs transfers (per peer and
7518 // connection).
7519 // TODO re-evaluate the bucket count to a more realistic value.
7520 // TODO: In the case of a shortid-collision, we should request all the
7521 // proofs which collided. For now, we only request one, which is not
7522 // that bad considering this event is expected to be very rare.
7523 auto shortIdProcessor =
7525 compactProofs.getShortIDs(), 15);
7526
7527 if (shortIdProcessor.hasOutOfBoundIndex()) {
7528 // This should be catched by deserialization, but catch it here as
7529 // well as a good measure.
7530 Misbehaving(*peer, "avaproofs-bad-indexes");
7531 return;
7532 }
7533 if (!shortIdProcessor.isEvenlyDistributed()) {
7534 // This is suspicious, don't ban but bail out
7535 return;
7536 }
7537
7538 std::vector<std::pair<avalanche::ProofId, bool>> remoteProofsStatus;
7539 m_avalanche->withPeerManager([&](const avalanche::PeerManager &pm) {
7540 pm.forEachPeer([&](const avalanche::Peer &peer) {
7541 assert(peer.proof);
7542 uint64_t shortid = compactProofs.getShortID(peer.getProofId());
7543
7544 int added =
7545 shortIdProcessor.matchKnownItem(shortid, peer.proof);
7546
7547 // No collision
7548 if (added >= 0) {
7549 // Because we know the proof, we can determine if our peer
7550 // has it (added = 1) or not (added = 0) and update the
7551 // remote proof status accordingly.
7552 remoteProofsStatus.emplace_back(peer.getProofId(),
7553 added > 0);
7554 }
7555
7556 // In order to properly determine which proof is missing, we
7557 // need to keep scanning for all our proofs.
7558 return true;
7559 });
7560 });
7561
7563 for (size_t i = 0; i < compactProofs.size(); i++) {
7564 if (shortIdProcessor.getItem(i) == nullptr) {
7565 req.indices.push_back(i);
7566 }
7567 }
7568
7569 m_connman.PushMessage(&pfrom,
7570 msgMaker.Make(NetMsgType::AVAPROOFSREQ, req));
7571
7572 const NodeId nodeid = pfrom.GetId();
7573
7574 // We want to keep a count of how many nodes we successfully requested
7575 // avaproofs from as this is used to determine when we are confident our
7576 // quorum is close enough to the other participants.
7577 m_avalanche->avaproofsSent(nodeid);
7578
7579 // Only save remote proofs from stakers
7581 return pfrom.m_avalanche_pubkey.has_value())) {
7582 m_avalanche->withPeerManager(
7583 [&remoteProofsStatus, nodeid](avalanche::PeerManager &pm) {
7584 for (const auto &[proofid, present] : remoteProofsStatus) {
7585 pm.saveRemoteProof(proofid, nodeid, present);
7586 }
7587 });
7588 }
7589
7590 return;
7591 }
7592
7593 if (msg_type == NetMsgType::AVAPROOFSREQ) {
7594 if (peer->m_proof_relay == nullptr) {
7595 return;
7596 }
7597
7598 avalanche::ProofsRequest proofreq;
7599 vRecv >> proofreq;
7600
7601 auto requestedIndiceIt = proofreq.indices.begin();
7602 uint32_t treeIndice = 0;
7603 peer->m_proof_relay->sharedProofs.forEachLeaf([&](const auto &proof) {
7604 if (requestedIndiceIt == proofreq.indices.end()) {
7605 // No more indice to process
7606 return false;
7607 }
7608
7609 if (treeIndice++ == *requestedIndiceIt) {
7610 m_connman.PushMessage(
7611 &pfrom, msgMaker.Make(NetMsgType::AVAPROOF, *proof));
7612 requestedIndiceIt++;
7613 }
7614
7615 return true;
7616 });
7617
7618 peer->m_proof_relay->sharedProofs = {};
7619 return;
7620 }
7621
7622 if (msg_type == NetMsgType::GETADDR) {
7623 // This asymmetric behavior for inbound and outbound connections was
7624 // introduced to prevent a fingerprinting attack: an attacker can send
7625 // specific fake addresses to users' AddrMan and later request them by
7626 // sending getaddr messages. Making nodes which are behind NAT and can
7627 // only make outgoing connections ignore the getaddr message mitigates
7628 // the attack.
7629 if (!pfrom.IsInboundConn()) {
7631 "Ignoring \"getaddr\" from %s connection. peer=%d\n",
7632 pfrom.ConnectionTypeAsString(), pfrom.GetId());
7633 return;
7634 }
7635
7636 // Since this must be an inbound connection, SetupAddressRelay will
7637 // never fail.
7638 Assume(SetupAddressRelay(pfrom, *peer));
7639
7640 // Only send one GetAddr response per connection to reduce resource
7641 // waste and discourage addr stamping of INV announcements.
7642 if (peer->m_getaddr_recvd) {
7643 LogPrint(BCLog::NET, "Ignoring repeated \"getaddr\". peer=%d\n",
7644 pfrom.GetId());
7645 return;
7646 }
7647 peer->m_getaddr_recvd = true;
7648
7649 peer->m_addrs_to_send.clear();
7650 std::vector<CAddress> vAddr;
7651 const size_t maxAddrToSend = m_opts.max_addr_to_send;
7653 vAddr = m_connman.GetAddresses(maxAddrToSend, MAX_PCT_ADDR_TO_SEND,
7654 /* network */ std::nullopt);
7655 } else {
7656 vAddr = m_connman.GetAddresses(pfrom, maxAddrToSend,
7658 }
7659 for (const CAddress &addr : vAddr) {
7660 PushAddress(*peer, addr);
7661 }
7662 return;
7663 }
7664
7665 if (msg_type == NetMsgType::GETAVAADDR) {
7666 auto now = GetTime<std::chrono::seconds>();
7667 if (now < pfrom.m_nextGetAvaAddr) {
7668 // Prevent a peer from exhausting our resources by spamming
7669 // getavaaddr messages.
7670 return;
7671 }
7672
7673 // Only accept a getavaaddr every GETAVAADDR_INTERVAL at most
7675
7676 if (!SetupAddressRelay(pfrom, *peer)) {
7678 "Ignoring getavaaddr message from %s peer=%d\n",
7679 pfrom.ConnectionTypeAsString(), pfrom.GetId());
7680 return;
7681 }
7682
7683 auto availabilityScoreComparator = [](const CNode *lhs,
7684 const CNode *rhs) {
7685 double scoreLhs = lhs->getAvailabilityScore();
7686 double scoreRhs = rhs->getAvailabilityScore();
7687
7688 if (scoreLhs != scoreRhs) {
7689 return scoreLhs > scoreRhs;
7690 }
7691
7692 return lhs < rhs;
7693 };
7694
7695 // Get up to MAX_ADDR_TO_SEND addresses of the nodes which are the
7696 // most active in the avalanche network. Account for 0 availability as
7697 // well so we can send addresses even if we did not start polling yet.
7698 std::set<const CNode *, decltype(availabilityScoreComparator)> avaNodes(
7699 availabilityScoreComparator);
7700 m_connman.ForEachNode([&](const CNode *pnode) {
7701 if (!pnode->m_avalanche_enabled ||
7702 pnode->getAvailabilityScore() < 0.) {
7703 return;
7704 }
7705
7706 avaNodes.insert(pnode);
7707 if (avaNodes.size() > m_opts.max_addr_to_send) {
7708 avaNodes.erase(std::prev(avaNodes.end()));
7709 }
7710 });
7711
7712 peer->m_addrs_to_send.clear();
7713 for (const CNode *pnode : avaNodes) {
7714 PushAddress(*peer, pnode->addr);
7715 }
7716
7717 return;
7718 }
7719
7720 if (msg_type == NetMsgType::MEMPOOL) {
7721 if (!(peer->m_our_services & NODE_BLOOM) &&
7725 "mempool request with bloom filters disabled, "
7726 "disconnect peer=%d\n",
7727 pfrom.GetId());
7728 pfrom.fDisconnect = true;
7729 }
7730 return;
7731 }
7732
7733 if (m_connman.OutboundTargetReached(false) &&
7737 "mempool request with bandwidth limit reached, "
7738 "disconnect peer=%d\n",
7739 pfrom.GetId());
7740 pfrom.fDisconnect = true;
7741 }
7742 return;
7743 }
7744
7745 if (auto tx_relay = peer->GetTxRelay()) {
7746 LOCK(tx_relay->m_tx_inventory_mutex);
7747 tx_relay->m_send_mempool = true;
7748 }
7749 return;
7750 }
7751
7752 if (msg_type == NetMsgType::PING) {
7753 if (pfrom.GetCommonVersion() > BIP0031_VERSION) {
7754 uint64_t nonce = 0;
7755 vRecv >> nonce;
7756 // Echo the message back with the nonce. This allows for two useful
7757 // features:
7758 //
7759 // 1) A remote node can quickly check if the connection is
7760 // operational.
7761 // 2) Remote nodes can measure the latency of the network thread. If
7762 // this node is overloaded it won't respond to pings quickly and the
7763 // remote node can avoid sending us more work, like chain download
7764 // requests.
7765 //
7766 // The nonce stops the remote getting confused between different
7767 // pings: without it, if the remote node sends a ping once per
7768 // second and this node takes 5 seconds to respond to each, the 5th
7769 // ping the remote sends would appear to return very quickly.
7770 m_connman.PushMessage(&pfrom,
7771 msgMaker.Make(NetMsgType::PONG, nonce));
7772 }
7773 return;
7774 }
7775
7776 if (msg_type == NetMsgType::PONG) {
7777 const auto ping_end = time_received;
7778 uint64_t nonce = 0;
7779 size_t nAvail = vRecv.in_avail();
7780 bool bPingFinished = false;
7781 std::string sProblem;
7782
7783 if (nAvail >= sizeof(nonce)) {
7784 vRecv >> nonce;
7785
7786 // Only process pong message if there is an outstanding ping (old
7787 // ping without nonce should never pong)
7788 if (peer->m_ping_nonce_sent != 0) {
7789 if (nonce == peer->m_ping_nonce_sent) {
7790 // Matching pong received, this ping is no longer
7791 // outstanding
7792 bPingFinished = true;
7793 const auto ping_time = ping_end - peer->m_ping_start.load();
7794 if (ping_time.count() >= 0) {
7795 // Let connman know about this successful ping-pong
7796 pfrom.PongReceived(ping_time);
7797 } else {
7798 // This should never happen
7799 sProblem = "Timing mishap";
7800 }
7801 } else {
7802 // Nonce mismatches are normal when pings are overlapping
7803 sProblem = "Nonce mismatch";
7804 if (nonce == 0) {
7805 // This is most likely a bug in another implementation
7806 // somewhere; cancel this ping
7807 bPingFinished = true;
7808 sProblem = "Nonce zero";
7809 }
7810 }
7811 } else {
7812 sProblem = "Unsolicited pong without ping";
7813 }
7814 } else {
7815 // This is most likely a bug in another implementation somewhere;
7816 // cancel this ping
7817 bPingFinished = true;
7818 sProblem = "Short payload";
7819 }
7820
7821 if (!(sProblem.empty())) {
7823 "pong peer=%d: %s, %x expected, %x received, %u bytes\n",
7824 pfrom.GetId(), sProblem, peer->m_ping_nonce_sent, nonce,
7825 nAvail);
7826 }
7827 if (bPingFinished) {
7828 peer->m_ping_nonce_sent = 0;
7829 }
7830 return;
7831 }
7832
7833 if (msg_type == NetMsgType::FILTERLOAD) {
7834 if (!(peer->m_our_services & NODE_BLOOM)) {
7836 "filterload received despite not offering bloom services "
7837 "from peer=%d; disconnecting\n",
7838 pfrom.GetId());
7839 pfrom.fDisconnect = true;
7840 return;
7841 }
7842 CBloomFilter filter;
7843 vRecv >> filter;
7844
7845 if (!filter.IsWithinSizeConstraints()) {
7846 // There is no excuse for sending a too-large filter
7847 Misbehaving(*peer, "too-large bloom filter");
7848 } else if (auto tx_relay = peer->GetTxRelay()) {
7849 {
7850 LOCK(tx_relay->m_bloom_filter_mutex);
7851 tx_relay->m_bloom_filter.reset(new CBloomFilter(filter));
7852 tx_relay->m_relay_txs = true;
7853 }
7854 pfrom.m_bloom_filter_loaded = true;
7855 }
7856 return;
7857 }
7858
7859 if (msg_type == NetMsgType::FILTERADD) {
7860 if (!(peer->m_our_services & NODE_BLOOM)) {
7862 "filteradd received despite not offering bloom services "
7863 "from peer=%d; disconnecting\n",
7864 pfrom.GetId());
7865 pfrom.fDisconnect = true;
7866 return;
7867 }
7868 std::vector<uint8_t> vData;
7869 vRecv >> vData;
7870
7871 // Nodes must NEVER send a data item > 520 bytes (the max size for a
7872 // script data object, and thus, the maximum size any matched object can
7873 // have) in a filteradd message.
7874 bool bad = false;
7875 if (vData.size() > MAX_SCRIPT_ELEMENT_SIZE) {
7876 bad = true;
7877 } else if (auto tx_relay = peer->GetTxRelay()) {
7878 LOCK(tx_relay->m_bloom_filter_mutex);
7879 if (tx_relay->m_bloom_filter) {
7880 tx_relay->m_bloom_filter->insert(vData);
7881 } else {
7882 bad = true;
7883 }
7884 }
7885 if (bad) {
7886 // The structure of this code doesn't really allow for a good error
7887 // code. We'll go generic.
7888 Misbehaving(*peer, "bad filteradd message");
7889 }
7890 return;
7891 }
7892
7893 if (msg_type == NetMsgType::FILTERCLEAR) {
7894 if (!(peer->m_our_services & NODE_BLOOM)) {
7896 "filterclear received despite not offering bloom services "
7897 "from peer=%d; disconnecting\n",
7898 pfrom.GetId());
7899 pfrom.fDisconnect = true;
7900 return;
7901 }
7902 auto tx_relay = peer->GetTxRelay();
7903 if (!tx_relay) {
7904 return;
7905 }
7906
7907 {
7908 LOCK(tx_relay->m_bloom_filter_mutex);
7909 tx_relay->m_bloom_filter = nullptr;
7910 tx_relay->m_relay_txs = true;
7911 }
7912 pfrom.m_bloom_filter_loaded = false;
7913 pfrom.m_relays_txs = true;
7914 return;
7915 }
7916
7917 if (msg_type == NetMsgType::FEEFILTER) {
7918 Amount newFeeFilter = Amount::zero();
7919 vRecv >> newFeeFilter;
7920 if (MoneyRange(newFeeFilter)) {
7921 if (auto tx_relay = peer->GetTxRelay()) {
7922 tx_relay->m_fee_filter_received = newFeeFilter;
7923 }
7924 LogPrint(BCLog::NET, "received: feefilter of %s from peer=%d\n",
7925 CFeeRate(newFeeFilter).ToString(), pfrom.GetId());
7926 }
7927 return;
7928 }
7929
7930 if (msg_type == NetMsgType::GETCFILTERS) {
7931 ProcessGetCFilters(pfrom, *peer, vRecv);
7932 return;
7933 }
7934
7935 if (msg_type == NetMsgType::GETCFHEADERS) {
7936 ProcessGetCFHeaders(pfrom, *peer, vRecv);
7937 return;
7938 }
7939
7940 if (msg_type == NetMsgType::GETCFCHECKPT) {
7941 ProcessGetCFCheckPt(pfrom, *peer, vRecv);
7942 return;
7943 }
7944
7945 if (msg_type == NetMsgType::NOTFOUND) {
7946 std::vector<CInv> vInv;
7947 vRecv >> vInv;
7948 // A peer might send up to 1 notfound per getdata request, but no more
7949 if (vInv.size() <= PROOF_REQUEST_PARAMS.max_peer_announcements +
7952 for (CInv &inv : vInv) {
7953 if (inv.IsMsgTx()) {
7954 // If we receive a NOTFOUND message for a tx we requested,
7955 // mark the announcement for it as completed in
7956 // InvRequestTracker.
7957 LOCK(::cs_main);
7958 m_txrequest.ReceivedResponse(pfrom.GetId(), TxId(inv.hash));
7959 continue;
7960 }
7961 if (inv.IsMsgProof()) {
7962 if (!m_avalanche) {
7963 continue;
7964 }
7965 LOCK(cs_proofrequest);
7966 m_proofrequest.ReceivedResponse(
7967 pfrom.GetId(), avalanche::ProofId(inv.hash));
7968 }
7969 }
7970 }
7971 return;
7972 }
7973
7974 // Ignore unknown commands for extensibility
7975 LogPrint(BCLog::NET, "Unknown command \"%s\" from peer=%d\n",
7976 SanitizeString(msg_type), pfrom.GetId());
7977 return;
7978}
7979
7980bool PeerManagerImpl::MaybeDiscourageAndDisconnect(CNode &pnode, Peer &peer) {
7981 {
7982 LOCK(peer.m_misbehavior_mutex);
7983
7984 // There's nothing to do if the m_should_discourage flag isn't set
7985 if (!peer.m_should_discourage) {
7986 return false;
7987 }
7988
7989 peer.m_should_discourage = false;
7990 } // peer.m_misbehavior_mutex
7991
7993 // We never disconnect or discourage peers for bad behavior if they have
7994 // NetPermissionFlags::NoBan permission
7995 LogPrintf("Warning: not punishing noban peer %d!\n", peer.m_id);
7996 return false;
7997 }
7998
7999 if (pnode.IsManualConn()) {
8000 // We never disconnect or discourage manual peers for bad behavior
8001 LogPrintf("Warning: not punishing manually connected peer %d!\n",
8002 peer.m_id);
8003 return false;
8004 }
8005
8006 if (pnode.addr.IsLocal()) {
8007 // We disconnect local peers for bad behavior but don't discourage
8008 // (since that would discourage all peers on the same local address)
8010 "Warning: disconnecting but not discouraging %s peer %d!\n",
8011 pnode.m_inbound_onion ? "inbound onion" : "local", peer.m_id);
8012 pnode.fDisconnect = true;
8013 return true;
8014 }
8015
8016 // Normal case: Disconnect the peer and discourage all nodes sharing the
8017 // address
8018 LogPrint(BCLog::NET, "Disconnecting and discouraging peer %d!\n",
8019 peer.m_id);
8020 if (m_banman) {
8021 m_banman->Discourage(pnode.addr);
8022 }
8023 m_connman.DisconnectNode(pnode.addr);
8024 return true;
8025}
8026
8027bool PeerManagerImpl::ProcessMessages(const Config &config, CNode *pfrom,
8028 std::atomic<bool> &interruptMsgProc) {
8029 AssertLockHeld(g_msgproc_mutex);
8030
8031 //
8032 // Message format
8033 // (4) message start
8034 // (12) command
8035 // (4) size
8036 // (4) checksum
8037 // (x) data
8038 //
8039
8040 PeerRef peer = GetPeerRef(pfrom->GetId());
8041 if (peer == nullptr) {
8042 return false;
8043 }
8044
8045 {
8046 LOCK(peer->m_getdata_requests_mutex);
8047 if (!peer->m_getdata_requests.empty()) {
8048 ProcessGetData(config, *pfrom, *peer, interruptMsgProc);
8049 }
8050 }
8051
8052 const bool processed_orphan = ProcessOrphanTx(config, *peer);
8053
8054 if (pfrom->fDisconnect) {
8055 return false;
8056 }
8057
8058 if (processed_orphan) {
8059 return true;
8060 }
8061
8062 // this maintains the order of responses and prevents m_getdata_requests to
8063 // grow unbounded
8064 {
8065 LOCK(peer->m_getdata_requests_mutex);
8066 if (!peer->m_getdata_requests.empty()) {
8067 return true;
8068 }
8069 }
8070
8071 // Don't bother if send buffer is too full to respond anyway
8072 if (pfrom->fPauseSend) {
8073 return false;
8074 }
8075
8076 auto poll_result{pfrom->PollMessage()};
8077 if (!poll_result) {
8078 // No message to process
8079 return false;
8080 }
8081
8082 CNetMessage &msg{poll_result->first};
8083 bool fMoreWork = poll_result->second;
8084
8085 TRACE6(net, inbound_message, pfrom->GetId(), pfrom->m_addr_name.c_str(),
8086 pfrom->ConnectionTypeAsString().c_str(), msg.m_type.c_str(),
8087 msg.m_recv.size(), msg.m_recv.data());
8088
8089 if (m_opts.capture_messages) {
8090 CaptureMessage(pfrom->addr, msg.m_type, MakeUCharSpan(msg.m_recv),
8091 /*is_incoming=*/true);
8092 }
8093
8094 msg.SetVersion(pfrom->GetCommonVersion());
8095
8096 // Check network magic
8097 if (!msg.m_valid_netmagic) {
8099 "PROCESSMESSAGE: INVALID MESSAGESTART %s peer=%d\n",
8100 SanitizeString(msg.m_type), pfrom->GetId());
8101
8102 // Make sure we discourage where that come from for some time.
8103 if (m_banman) {
8104 m_banman->Discourage(pfrom->addr);
8105 }
8106 m_connman.DisconnectNode(pfrom->addr);
8107
8108 pfrom->fDisconnect = true;
8109 return false;
8110 }
8111
8112 // Check header
8113 if (!msg.m_valid_header) {
8114 LogPrint(BCLog::NET, "PROCESSMESSAGE: ERRORS IN HEADER %s peer=%d\n",
8115 SanitizeString(msg.m_type), pfrom->GetId());
8116 return fMoreWork;
8117 }
8118
8119 // Checksum
8120 CDataStream &vRecv = msg.m_recv;
8121 if (!msg.m_valid_checksum) {
8122 LogPrint(BCLog::NET, "%s(%s, %u bytes): CHECKSUM ERROR peer=%d\n",
8123 __func__, SanitizeString(msg.m_type), msg.m_message_size,
8124 pfrom->GetId());
8125 if (m_banman) {
8126 m_banman->Discourage(pfrom->addr);
8127 }
8128 m_connman.DisconnectNode(pfrom->addr);
8129 return fMoreWork;
8130 }
8131
8132 try {
8133 ProcessMessage(config, *pfrom, msg.m_type, vRecv, msg.m_time,
8134 interruptMsgProc);
8135 if (interruptMsgProc) {
8136 return false;
8137 }
8138
8139 {
8140 LOCK(peer->m_getdata_requests_mutex);
8141 if (!peer->m_getdata_requests.empty()) {
8142 fMoreWork = true;
8143 }
8144 }
8145 // Does this peer has an orphan ready to reconsider?
8146 // (Note: we may have provided a parent for an orphan provided by
8147 // another peer that was already processed; in that case, the extra work
8148 // may not be noticed, possibly resulting in an unnecessary 100ms delay)
8149 if (m_mempool.withOrphanage([&peer](TxOrphanage &orphanage) {
8150 return orphanage.HaveTxToReconsider(peer->m_id);
8151 })) {
8152 fMoreWork = true;
8153 }
8154 } catch (const std::exception &e) {
8155 LogPrint(BCLog::NET, "%s(%s, %u bytes): Exception '%s' (%s) caught\n",
8156 __func__, SanitizeString(msg.m_type), msg.m_message_size,
8157 e.what(), typeid(e).name());
8158 } catch (...) {
8159 LogPrint(BCLog::NET, "%s(%s, %u bytes): Unknown exception caught\n",
8160 __func__, SanitizeString(msg.m_type), msg.m_message_size);
8161 }
8162
8163 return fMoreWork;
8164}
8165
8166void PeerManagerImpl::ConsiderEviction(CNode &pto, Peer &peer,
8167 std::chrono::seconds time_in_seconds) {
8169
8170 CNodeState &state = *State(pto.GetId());
8171 const CNetMsgMaker msgMaker(pto.GetCommonVersion());
8172
8173 if (!state.m_chain_sync.m_protect && pto.IsOutboundOrBlockRelayConn() &&
8174 state.fSyncStarted) {
8175 // This is an outbound peer subject to disconnection if they don't
8176 // announce a block with as much work as the current tip within
8177 // CHAIN_SYNC_TIMEOUT + HEADERS_RESPONSE_TIME seconds (note: if their
8178 // chain has more work than ours, we should sync to it, unless it's
8179 // invalid, in which case we should find that out and disconnect from
8180 // them elsewhere).
8181 if (state.pindexBestKnownBlock != nullptr &&
8182 state.pindexBestKnownBlock->nChainWork >=
8183 m_chainman.ActiveChain().Tip()->nChainWork) {
8184 if (state.m_chain_sync.m_timeout != 0s) {
8185 state.m_chain_sync.m_timeout = 0s;
8186 state.m_chain_sync.m_work_header = nullptr;
8187 state.m_chain_sync.m_sent_getheaders = false;
8188 }
8189 } else if (state.m_chain_sync.m_timeout == 0s ||
8190 (state.m_chain_sync.m_work_header != nullptr &&
8191 state.pindexBestKnownBlock != nullptr &&
8192 state.pindexBestKnownBlock->nChainWork >=
8193 state.m_chain_sync.m_work_header->nChainWork)) {
8194 // Our best block known by this peer is behind our tip, and we're
8195 // either noticing that for the first time, OR this peer was able to
8196 // catch up to some earlier point where we checked against our tip.
8197 // Either way, set a new timeout based on current tip.
8198 state.m_chain_sync.m_timeout = time_in_seconds + CHAIN_SYNC_TIMEOUT;
8199 state.m_chain_sync.m_work_header = m_chainman.ActiveChain().Tip();
8200 state.m_chain_sync.m_sent_getheaders = false;
8201 } else if (state.m_chain_sync.m_timeout > 0s &&
8202 time_in_seconds > state.m_chain_sync.m_timeout) {
8203 // No evidence yet that our peer has synced to a chain with work
8204 // equal to that of our tip, when we first detected it was behind.
8205 // Send a single getheaders message to give the peer a chance to
8206 // update us.
8207 if (state.m_chain_sync.m_sent_getheaders) {
8208 // They've run out of time to catch up!
8209 LogPrintf(
8210 "Disconnecting outbound peer %d for old chain, best known "
8211 "block = %s\n",
8212 pto.GetId(),
8213 state.pindexBestKnownBlock != nullptr
8214 ? state.pindexBestKnownBlock->GetBlockHash().ToString()
8215 : "<none>");
8216 pto.fDisconnect = true;
8217 } else {
8218 assert(state.m_chain_sync.m_work_header);
8219 // Here, we assume that the getheaders message goes out,
8220 // because it'll either go out or be skipped because of a
8221 // getheaders in-flight already, in which case the peer should
8222 // still respond to us with a sufficiently high work chain tip.
8223 MaybeSendGetHeaders(
8224 pto, GetLocator(state.m_chain_sync.m_work_header->pprev),
8225 peer);
8226 LogPrint(
8227 BCLog::NET,
8228 "sending getheaders to outbound peer=%d to verify chain "
8229 "work (current best known block:%s, benchmark blockhash: "
8230 "%s)\n",
8231 pto.GetId(),
8232 state.pindexBestKnownBlock != nullptr
8233 ? state.pindexBestKnownBlock->GetBlockHash().ToString()
8234 : "<none>",
8235 state.m_chain_sync.m_work_header->GetBlockHash()
8236 .ToString());
8237 state.m_chain_sync.m_sent_getheaders = true;
8238 // Bump the timeout to allow a response, which could clear the
8239 // timeout (if the response shows the peer has synced), reset
8240 // the timeout (if the peer syncs to the required work but not
8241 // to our tip), or result in disconnect (if we advance to the
8242 // timeout and pindexBestKnownBlock has not sufficiently
8243 // progressed)
8244 state.m_chain_sync.m_timeout =
8245 time_in_seconds + HEADERS_RESPONSE_TIME;
8246 }
8247 }
8248 }
8249}
8250
8251void PeerManagerImpl::EvictExtraOutboundPeers(std::chrono::seconds now) {
8252 // If we have any extra block-relay-only peers, disconnect the youngest
8253 // unless it's given us a block -- in which case, compare with the
8254 // second-youngest, and out of those two, disconnect the peer who least
8255 // recently gave us a block.
8256 // The youngest block-relay-only peer would be the extra peer we connected
8257 // to temporarily in order to sync our tip; see net.cpp.
8258 // Note that we use higher nodeid as a measure for most recent connection.
8259 if (m_connman.GetExtraBlockRelayCount() > 0) {
8260 std::pair<NodeId, std::chrono::seconds> youngest_peer{-1, 0},
8261 next_youngest_peer{-1, 0};
8262
8263 m_connman.ForEachNode([&](CNode *pnode) {
8264 if (!pnode->IsBlockOnlyConn() || pnode->fDisconnect) {
8265 return;
8266 }
8267 if (pnode->GetId() > youngest_peer.first) {
8268 next_youngest_peer = youngest_peer;
8269 youngest_peer.first = pnode->GetId();
8270 youngest_peer.second = pnode->m_last_block_time;
8271 }
8272 });
8273
8274 NodeId to_disconnect = youngest_peer.first;
8275 if (youngest_peer.second > next_youngest_peer.second) {
8276 // Our newest block-relay-only peer gave us a block more recently;
8277 // disconnect our second youngest.
8278 to_disconnect = next_youngest_peer.first;
8279 }
8280
8281 m_connman.ForNode(
8282 to_disconnect,
8285 // Make sure we're not getting a block right now, and that we've
8286 // been connected long enough for this eviction to happen at
8287 // all. Note that we only request blocks from a peer if we learn
8288 // of a valid headers chain with at least as much work as our
8289 // tip.
8290 CNodeState *node_state = State(pnode->GetId());
8291 if (node_state == nullptr ||
8292 (now - pnode->m_connected >= MINIMUM_CONNECT_TIME &&
8293 node_state->vBlocksInFlight.empty())) {
8294 pnode->fDisconnect = true;
8296 "disconnecting extra block-relay-only peer=%d "
8297 "(last block received at time %d)\n",
8298 pnode->GetId(),
8300 return true;
8301 } else {
8302 LogPrint(
8303 BCLog::NET,
8304 "keeping block-relay-only peer=%d chosen for eviction "
8305 "(connect time: %d, blocks_in_flight: %d)\n",
8306 pnode->GetId(), count_seconds(pnode->m_connected),
8307 node_state->vBlocksInFlight.size());
8308 }
8309 return false;
8310 });
8311 }
8312
8313 // Check whether we have too many OUTBOUND_FULL_RELAY peers
8314 if (m_connman.GetExtraFullOutboundCount() <= 0) {
8315 return;
8316 }
8317
8318 // If we have more OUTBOUND_FULL_RELAY peers than we target, disconnect one.
8319 // Pick the OUTBOUND_FULL_RELAY peer that least recently announced us a new
8320 // block, with ties broken by choosing the more recent connection (higher
8321 // node id)
8322 NodeId worst_peer = -1;
8323 int64_t oldest_block_announcement = std::numeric_limits<int64_t>::max();
8324
8325 m_connman.ForEachNode([&](CNode *pnode) EXCLUSIVE_LOCKS_REQUIRED(
8326 ::cs_main) {
8328
8329 // Only consider OUTBOUND_FULL_RELAY peers that are not already marked
8330 // for disconnection
8331 if (!pnode->IsFullOutboundConn() || pnode->fDisconnect) {
8332 return;
8333 }
8334 CNodeState *state = State(pnode->GetId());
8335 if (state == nullptr) {
8336 // shouldn't be possible, but just in case
8337 return;
8338 }
8339 // Don't evict our protected peers
8340 if (state->m_chain_sync.m_protect) {
8341 return;
8342 }
8343 if (state->m_last_block_announcement < oldest_block_announcement ||
8344 (state->m_last_block_announcement == oldest_block_announcement &&
8345 pnode->GetId() > worst_peer)) {
8346 worst_peer = pnode->GetId();
8347 oldest_block_announcement = state->m_last_block_announcement;
8348 }
8349 });
8350
8351 if (worst_peer == -1) {
8352 return;
8353 }
8354
8355 bool disconnected = m_connman.ForNode(
8356 worst_peer, [&](CNode *pnode) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) {
8358
8359 // Only disconnect a peer that has been connected to us for some
8360 // reasonable fraction of our check-frequency, to give it time for
8361 // new information to have arrived. Also don't disconnect any peer
8362 // we're trying to download a block from.
8363 CNodeState &state = *State(pnode->GetId());
8364 if (now - pnode->m_connected > MINIMUM_CONNECT_TIME &&
8365 state.vBlocksInFlight.empty()) {
8367 "disconnecting extra outbound peer=%d (last block "
8368 "announcement received at time %d)\n",
8369 pnode->GetId(), oldest_block_announcement);
8370 pnode->fDisconnect = true;
8371 return true;
8372 } else {
8374 "keeping outbound peer=%d chosen for eviction "
8375 "(connect time: %d, blocks_in_flight: %d)\n",
8376 pnode->GetId(), count_seconds(pnode->m_connected),
8377 state.vBlocksInFlight.size());
8378 return false;
8379 }
8380 });
8381
8382 if (disconnected) {
8383 // If we disconnected an extra peer, that means we successfully
8384 // connected to at least one peer after the last time we detected a
8385 // stale tip. Don't try any more extra peers until we next detect a
8386 // stale tip, to limit the load we put on the network from these extra
8387 // connections.
8388 m_connman.SetTryNewOutboundPeer(false);
8389 }
8390}
8391
8392void PeerManagerImpl::CheckForStaleTipAndEvictPeers() {
8393 LOCK(cs_main);
8394
8395 auto now{GetTime<std::chrono::seconds>()};
8396
8397 EvictExtraOutboundPeers(now);
8398
8399 if (now > m_stale_tip_check_time) {
8400 // Check whether our tip is stale, and if so, allow using an extra
8401 // outbound peer.
8402 if (!m_chainman.m_blockman.LoadingBlocks() &&
8403 m_connman.GetNetworkActive() && m_connman.GetUseAddrmanOutgoing() &&
8404 TipMayBeStale()) {
8405 LogPrintf("Potential stale tip detected, will try using extra "
8406 "outbound peer (last tip update: %d seconds ago)\n",
8407 count_seconds(now - m_last_tip_update.load()));
8408 m_connman.SetTryNewOutboundPeer(true);
8409 } else if (m_connman.GetTryNewOutboundPeer()) {
8410 m_connman.SetTryNewOutboundPeer(false);
8411 }
8412 m_stale_tip_check_time = now + STALE_CHECK_INTERVAL;
8413 }
8414
8415 if (!m_initial_sync_finished && CanDirectFetch()) {
8416 m_connman.StartExtraBlockRelayPeers();
8417 m_initial_sync_finished = true;
8418 }
8419}
8420
8421void PeerManagerImpl::MaybeSendPing(CNode &node_to, Peer &peer,
8422 std::chrono::microseconds now) {
8423 if (m_connman.ShouldRunInactivityChecks(
8424 node_to, std::chrono::duration_cast<std::chrono::seconds>(now)) &&
8425 peer.m_ping_nonce_sent &&
8426 now > peer.m_ping_start.load() + TIMEOUT_INTERVAL) {
8427 // The ping timeout is using mocktime. To disable the check during
8428 // testing, increase -peertimeout.
8429 LogPrint(BCLog::NET, "ping timeout: %fs peer=%d\n",
8430 0.000001 * count_microseconds(now - peer.m_ping_start.load()),
8431 peer.m_id);
8432 node_to.fDisconnect = true;
8433 return;
8434 }
8435
8436 const CNetMsgMaker msgMaker(node_to.GetCommonVersion());
8437 bool pingSend = false;
8438
8439 if (peer.m_ping_queued) {
8440 // RPC ping request by user
8441 pingSend = true;
8442 }
8443
8444 if (peer.m_ping_nonce_sent == 0 &&
8445 now > peer.m_ping_start.load() + PING_INTERVAL) {
8446 // Ping automatically sent as a latency probe & keepalive.
8447 pingSend = true;
8448 }
8449
8450 if (pingSend) {
8451 uint64_t nonce;
8452 do {
8453 nonce = FastRandomContext().rand64();
8454 } while (nonce == 0);
8455 peer.m_ping_queued = false;
8456 peer.m_ping_start = now;
8457 if (node_to.GetCommonVersion() > BIP0031_VERSION) {
8458 peer.m_ping_nonce_sent = nonce;
8459 m_connman.PushMessage(&node_to,
8460 msgMaker.Make(NetMsgType::PING, nonce));
8461 } else {
8462 // Peer is too old to support ping command with nonce, pong will
8463 // never arrive.
8464 peer.m_ping_nonce_sent = 0;
8465 m_connman.PushMessage(&node_to, msgMaker.Make(NetMsgType::PING));
8466 }
8467 }
8468}
8469
8470void PeerManagerImpl::MaybeSendAddr(CNode &node, Peer &peer,
8471 std::chrono::microseconds current_time) {
8472 // Nothing to do for non-address-relay peers
8473 if (!peer.m_addr_relay_enabled) {
8474 return;
8475 }
8476
8477 LOCK(peer.m_addr_send_times_mutex);
8478 if (fListen && !m_chainman.IsInitialBlockDownload() &&
8479 peer.m_next_local_addr_send < current_time) {
8480 // If we've sent before, clear the bloom filter for the peer, so
8481 // that our self-announcement will actually go out. This might
8482 // be unnecessary if the bloom filter has already rolled over
8483 // since our last self-announcement, but there is only a small
8484 // bandwidth cost that we can incur by doing this (which happens
8485 // once a day on average).
8486 if (peer.m_next_local_addr_send != 0us) {
8487 peer.m_addr_known->reset();
8488 }
8489 if (std::optional<CService> local_service = GetLocalAddrForPeer(node)) {
8490 CAddress local_addr{*local_service, peer.m_our_services,
8491 Now<NodeSeconds>()};
8492 PushAddress(peer, local_addr);
8493 }
8494 peer.m_next_local_addr_send =
8495 current_time +
8496 m_rng.rand_exp_duration(AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL);
8497 }
8498
8499 // We sent an `addr` message to this peer recently. Nothing more to do.
8500 if (current_time <= peer.m_next_addr_send) {
8501 return;
8502 }
8503
8504 peer.m_next_addr_send =
8505 current_time + m_rng.rand_exp_duration(AVG_ADDRESS_BROADCAST_INTERVAL);
8506
8507 const size_t max_addr_to_send = m_opts.max_addr_to_send;
8508 if (!Assume(peer.m_addrs_to_send.size() <= max_addr_to_send)) {
8509 // Should be impossible since we always check size before adding to
8510 // m_addrs_to_send. Recover by trimming the vector.
8511 peer.m_addrs_to_send.resize(max_addr_to_send);
8512 }
8513
8514 // Remove addr records that the peer already knows about, and add new
8515 // addrs to the m_addr_known filter on the same pass.
8516 auto addr_already_known =
8517 [&peer](const CAddress &addr)
8518 EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex) {
8519 bool ret = peer.m_addr_known->contains(addr.GetKey());
8520 if (!ret) {
8521 peer.m_addr_known->insert(addr.GetKey());
8522 }
8523 return ret;
8524 };
8525 peer.m_addrs_to_send.erase(std::remove_if(peer.m_addrs_to_send.begin(),
8526 peer.m_addrs_to_send.end(),
8527 addr_already_known),
8528 peer.m_addrs_to_send.end());
8529
8530 // No addr messages to send
8531 if (peer.m_addrs_to_send.empty()) {
8532 return;
8533 }
8534
8535 const char *msg_type;
8536 int make_flags;
8537 if (peer.m_wants_addrv2) {
8538 msg_type = NetMsgType::ADDRV2;
8539 make_flags = ADDRV2_FORMAT;
8540 } else {
8541 msg_type = NetMsgType::ADDR;
8542 make_flags = 0;
8543 }
8544 m_connman.PushMessage(
8545 &node, CNetMsgMaker(node.GetCommonVersion())
8546 .Make(make_flags, msg_type, peer.m_addrs_to_send));
8547 peer.m_addrs_to_send.clear();
8548
8549 // we only send the big addr message once
8550 if (peer.m_addrs_to_send.capacity() > 40) {
8551 peer.m_addrs_to_send.shrink_to_fit();
8552 }
8553}
8554
8555void PeerManagerImpl::MaybeSendSendHeaders(CNode &node, Peer &peer) {
8556 // Delay sending SENDHEADERS (BIP 130) until we're done with an
8557 // initial-headers-sync with this peer. Receiving headers announcements for
8558 // new blocks while trying to sync their headers chain is problematic,
8559 // because of the state tracking done.
8560 if (!peer.m_sent_sendheaders &&
8561 node.GetCommonVersion() >= SENDHEADERS_VERSION) {
8562 LOCK(cs_main);
8563 CNodeState &state = *State(node.GetId());
8564 if (state.pindexBestKnownBlock != nullptr &&
8565 state.pindexBestKnownBlock->nChainWork >
8566 m_chainman.MinimumChainWork()) {
8567 // Tell our peer we prefer to receive headers rather than inv's
8568 // We send this to non-NODE NETWORK peers as well, because even
8569 // non-NODE NETWORK peers can announce blocks (such as pruning
8570 // nodes)
8571 m_connman.PushMessage(&node, CNetMsgMaker(node.GetCommonVersion())
8573 peer.m_sent_sendheaders = true;
8574 }
8575 }
8576}
8577
8578void PeerManagerImpl::MaybeSendFeefilter(
8579 CNode &pto, Peer &peer, std::chrono::microseconds current_time) {
8580 if (m_opts.ignore_incoming_txs) {
8581 return;
8582 }
8583 if (pto.GetCommonVersion() < FEEFILTER_VERSION) {
8584 return;
8585 }
8586 // peers with the forcerelay permission should not filter txs to us
8588 return;
8589 }
8590 // Don't send feefilter messages to outbound block-relay-only peers since
8591 // they should never announce transactions to us, regardless of feefilter
8592 // state.
8593 if (pto.IsBlockOnlyConn()) {
8594 return;
8595 }
8596
8597 Amount currentFilter = m_mempool.GetMinFee().GetFeePerK();
8598
8599 if (m_chainman.IsInitialBlockDownload()) {
8600 // Received tx-inv messages are discarded when the active
8601 // chainstate is in IBD, so tell the peer to not send them.
8602 currentFilter = MAX_MONEY;
8603 } else {
8604 static const Amount MAX_FILTER{m_fee_filter_rounder.round(MAX_MONEY)};
8605 if (peer.m_fee_filter_sent == MAX_FILTER) {
8606 // Send the current filter if we sent MAX_FILTER previously
8607 // and made it out of IBD.
8608 peer.m_next_send_feefilter = 0us;
8609 }
8610 }
8611 if (current_time > peer.m_next_send_feefilter) {
8612 Amount filterToSend = m_fee_filter_rounder.round(currentFilter);
8613 // We always have a fee filter of at least the min relay fee
8614 filterToSend =
8615 std::max(filterToSend, m_mempool.m_min_relay_feerate.GetFeePerK());
8616 if (filterToSend != peer.m_fee_filter_sent) {
8617 m_connman.PushMessage(
8618 &pto, CNetMsgMaker(pto.GetCommonVersion())
8619 .Make(NetMsgType::FEEFILTER, filterToSend));
8620 peer.m_fee_filter_sent = filterToSend;
8621 }
8622 peer.m_next_send_feefilter =
8623 current_time +
8624 m_rng.rand_exp_duration(AVG_FEEFILTER_BROADCAST_INTERVAL);
8625 }
8626 // If the fee filter has changed substantially and it's still more than
8627 // MAX_FEEFILTER_CHANGE_DELAY until scheduled broadcast, then move the
8628 // broadcast to within MAX_FEEFILTER_CHANGE_DELAY.
8629 else if (current_time + MAX_FEEFILTER_CHANGE_DELAY <
8630 peer.m_next_send_feefilter &&
8631 (currentFilter < 3 * peer.m_fee_filter_sent / 4 ||
8632 currentFilter > 4 * peer.m_fee_filter_sent / 3)) {
8633 peer.m_next_send_feefilter =
8634 current_time +
8635 FastRandomContext().randrange<std::chrono::microseconds>(
8637 }
8638}
8639
8640namespace {
8641class CompareInvMempoolOrder {
8642 CTxMemPool *mp;
8643
8644public:
8645 explicit CompareInvMempoolOrder(CTxMemPool *_mempool) : mp(_mempool) {}
8646
8647 bool operator()(std::set<TxId>::iterator a, std::set<TxId>::iterator b) {
8652 return mp->CompareTopologically(*b, *a);
8653 }
8654};
8655} // namespace
8656
8657bool PeerManagerImpl::RejectIncomingTxs(const CNode &peer) const {
8658 // block-relay-only peers may never send txs to us
8659 if (peer.IsBlockOnlyConn()) {
8660 return true;
8661 }
8662 if (peer.IsFeelerConn()) {
8663 return true;
8664 }
8665 // In -blocksonly mode, peers need the 'relay' permission to send txs to us
8666 if (m_opts.ignore_incoming_txs &&
8668 return true;
8669 }
8670 return false;
8671}
8672
8673bool PeerManagerImpl::SetupAddressRelay(const CNode &node, Peer &peer) {
8674 // We don't participate in addr relay with outbound block-relay-only
8675 // connections to prevent providing adversaries with the additional
8676 // information of addr traffic to infer the link.
8677 if (node.IsBlockOnlyConn()) {
8678 return false;
8679 }
8680
8681 if (!peer.m_addr_relay_enabled.exchange(true)) {
8682 // During version message processing (non-block-relay-only outbound
8683 // peers) or on first addr-related message we have received (inbound
8684 // peers), initialize m_addr_known.
8685 peer.m_addr_known = std::make_unique<CRollingBloomFilter>(5000, 0.001);
8686 }
8687
8688 return true;
8689}
8690
8691bool PeerManagerImpl::SendMessages(const Config &config, CNode *pto) {
8692 AssertLockHeld(g_msgproc_mutex);
8693
8694 PeerRef peer = GetPeerRef(pto->GetId());
8695 if (!peer) {
8696 return false;
8697 }
8698 const Consensus::Params &consensusParams = m_chainparams.GetConsensus();
8699
8700 // We must call MaybeDiscourageAndDisconnect first, to ensure that we'll
8701 // disconnect misbehaving peers even before the version handshake is
8702 // complete.
8703 if (MaybeDiscourageAndDisconnect(*pto, *peer)) {
8704 return true;
8705 }
8706
8707 // Don't send anything until the version handshake is complete
8708 if (!pto->fSuccessfullyConnected || pto->fDisconnect) {
8709 return true;
8710 }
8711
8712 // If we get here, the outgoing message serialization version is set and
8713 // can't change.
8714 const CNetMsgMaker msgMaker(pto->GetCommonVersion());
8715
8716 const auto current_time{GetTime<std::chrono::microseconds>()};
8717
8718 if (pto->IsAddrFetchConn() &&
8719 current_time - pto->m_connected > 10 * AVG_ADDRESS_BROADCAST_INTERVAL) {
8721 "addrfetch connection timeout; disconnecting peer=%d\n",
8722 pto->GetId());
8723 pto->fDisconnect = true;
8724 return true;
8725 }
8726
8727 MaybeSendPing(*pto, *peer, current_time);
8728
8729 // MaybeSendPing may have marked peer for disconnection
8730 if (pto->fDisconnect) {
8731 return true;
8732 }
8733
8734 bool sync_blocks_and_headers_from_peer = false;
8735
8736 MaybeSendAddr(*pto, *peer, current_time);
8737
8738 MaybeSendSendHeaders(*pto, *peer);
8739
8740 {
8741 LOCK(cs_main);
8742
8743 CNodeState &state = *State(pto->GetId());
8744
8745 // Start block sync
8746 if (m_chainman.m_best_header == nullptr) {
8747 m_chainman.m_best_header = m_chainman.ActiveChain().Tip();
8748 }
8749
8750 // Determine whether we might try initial headers sync or parallel
8751 // block download from this peer -- this mostly affects behavior while
8752 // in IBD (once out of IBD, we sync from all peers).
8753 if (state.fPreferredDownload) {
8754 sync_blocks_and_headers_from_peer = true;
8755 } else if (CanServeBlocks(*peer) && !pto->IsAddrFetchConn()) {
8756 // Typically this is an inbound peer. If we don't have any outbound
8757 // peers, or if we aren't downloading any blocks from such peers,
8758 // then allow block downloads from this peer, too.
8759 // We prefer downloading blocks from outbound peers to avoid
8760 // putting undue load on (say) some home user who is just making
8761 // outbound connections to the network, but if our only source of
8762 // the latest blocks is from an inbound peer, we have to be sure to
8763 // eventually download it (and not just wait indefinitely for an
8764 // outbound peer to have it).
8765 if (m_num_preferred_download_peers == 0 ||
8766 mapBlocksInFlight.empty()) {
8767 sync_blocks_and_headers_from_peer = true;
8768 }
8769 }
8770
8771 if (!state.fSyncStarted && CanServeBlocks(*peer) &&
8772 !m_chainman.m_blockman.LoadingBlocks()) {
8773 // Only actively request headers from a single peer, unless we're
8774 // close to today.
8775 if ((nSyncStarted == 0 && sync_blocks_and_headers_from_peer) ||
8776 m_chainman.m_best_header->Time() > GetAdjustedTime() - 24h) {
8777 const CBlockIndex *pindexStart = m_chainman.m_best_header;
8786 if (pindexStart->pprev) {
8787 pindexStart = pindexStart->pprev;
8788 }
8789 if (MaybeSendGetHeaders(*pto, GetLocator(pindexStart), *peer)) {
8790 LogPrint(
8791 BCLog::NET,
8792 "initial getheaders (%d) to peer=%d (startheight:%d)\n",
8793 pindexStart->nHeight, pto->GetId(),
8794 peer->m_starting_height);
8795
8796 state.fSyncStarted = true;
8797 peer->m_headers_sync_timeout =
8798 current_time + HEADERS_DOWNLOAD_TIMEOUT_BASE +
8799 (
8800 // Convert HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER to
8801 // microseconds before scaling to maintain precision
8802 std::chrono::microseconds{
8804 Ticks<std::chrono::seconds>(
8805 GetAdjustedTime() -
8806 m_chainman.m_best_header->Time()) /
8807 consensusParams.nPowTargetSpacing);
8808 nSyncStarted++;
8809 }
8810 }
8811 }
8812
8813 //
8814 // Try sending block announcements via headers
8815 //
8816 {
8817 // If we have less than MAX_BLOCKS_TO_ANNOUNCE in our list of block
8818 // hashes we're relaying, and our peer wants headers announcements,
8819 // then find the first header not yet known to our peer but would
8820 // connect, and send. If no header would connect, or if we have too
8821 // many blocks, or if the peer doesn't want headers, just add all to
8822 // the inv queue.
8823 LOCK(peer->m_block_inv_mutex);
8824 std::vector<CBlock> vHeaders;
8825 bool fRevertToInv =
8826 ((!peer->m_prefers_headers &&
8827 (!state.m_requested_hb_cmpctblocks ||
8828 peer->m_blocks_for_headers_relay.size() > 1)) ||
8829 peer->m_blocks_for_headers_relay.size() >
8831 // last header queued for delivery
8832 const CBlockIndex *pBestIndex = nullptr;
8833 // ensure pindexBestKnownBlock is up-to-date
8834 ProcessBlockAvailability(pto->GetId());
8835
8836 if (!fRevertToInv) {
8837 bool fFoundStartingHeader = false;
8838 // Try to find first header that our peer doesn't have, and then
8839 // send all headers past that one. If we come across an headers
8840 // that aren't on m_chainman.ActiveChain(), give up.
8841 for (const BlockHash &hash : peer->m_blocks_for_headers_relay) {
8842 const CBlockIndex *pindex =
8843 m_chainman.m_blockman.LookupBlockIndex(hash);
8844 assert(pindex);
8845 if (m_chainman.ActiveChain()[pindex->nHeight] != pindex) {
8846 // Bail out if we reorged away from this block
8847 fRevertToInv = true;
8848 break;
8849 }
8850 if (pBestIndex != nullptr && pindex->pprev != pBestIndex) {
8851 // This means that the list of blocks to announce don't
8852 // connect to each other. This shouldn't really be
8853 // possible to hit during regular operation (because
8854 // reorgs should take us to a chain that has some block
8855 // not on the prior chain, which should be caught by the
8856 // prior check), but one way this could happen is by
8857 // using invalidateblock / reconsiderblock repeatedly on
8858 // the tip, causing it to be added multiple times to
8859 // m_blocks_for_headers_relay. Robustly deal with this
8860 // rare situation by reverting to an inv.
8861 fRevertToInv = true;
8862 break;
8863 }
8864 pBestIndex = pindex;
8865 if (fFoundStartingHeader) {
8866 // add this to the headers message
8867 vHeaders.push_back(pindex->GetBlockHeader());
8868 } else if (PeerHasHeader(&state, pindex)) {
8869 // Keep looking for the first new block.
8870 continue;
8871 } else if (pindex->pprev == nullptr ||
8872 PeerHasHeader(&state, pindex->pprev)) {
8873 // Peer doesn't have this header but they do have the
8874 // prior one. Start sending headers.
8875 fFoundStartingHeader = true;
8876 vHeaders.push_back(pindex->GetBlockHeader());
8877 } else {
8878 // Peer doesn't have this header or the prior one --
8879 // nothing will connect, so bail out.
8880 fRevertToInv = true;
8881 break;
8882 }
8883 }
8884 }
8885 if (!fRevertToInv && !vHeaders.empty()) {
8886 if (vHeaders.size() == 1 && state.m_requested_hb_cmpctblocks) {
8887 // We only send up to 1 block as header-and-ids, as
8888 // otherwise probably means we're doing an initial-ish-sync
8889 // or they're slow.
8891 "%s sending header-and-ids %s to peer=%d\n",
8892 __func__, vHeaders.front().GetHash().ToString(),
8893 pto->GetId());
8894
8895 std::optional<CSerializedNetMsg> cached_cmpctblock_msg;
8896 {
8897 LOCK(m_most_recent_block_mutex);
8898 if (m_most_recent_block_hash ==
8899 pBestIndex->GetBlockHash()) {
8900 cached_cmpctblock_msg =
8901 msgMaker.Make(NetMsgType::CMPCTBLOCK,
8902 *m_most_recent_compact_block);
8903 }
8904 }
8905 if (cached_cmpctblock_msg.has_value()) {
8906 m_connman.PushMessage(
8907 pto, std::move(cached_cmpctblock_msg.value()));
8908 } else {
8909 CBlock block;
8910 const bool ret{m_chainman.m_blockman.ReadBlockFromDisk(
8911 block, *pBestIndex)};
8912 assert(ret);
8913 CBlockHeaderAndShortTxIDs cmpctblock(block);
8914 m_connman.PushMessage(
8915 pto,
8916 msgMaker.Make(NetMsgType::CMPCTBLOCK, cmpctblock));
8917 }
8918 state.pindexBestHeaderSent = pBestIndex;
8919 } else if (peer->m_prefers_headers) {
8920 if (vHeaders.size() > 1) {
8922 "%s: %u headers, range (%s, %s), to peer=%d\n",
8923 __func__, vHeaders.size(),
8924 vHeaders.front().GetHash().ToString(),
8925 vHeaders.back().GetHash().ToString(),
8926 pto->GetId());
8927 } else {
8929 "%s: sending header %s to peer=%d\n", __func__,
8930 vHeaders.front().GetHash().ToString(),
8931 pto->GetId());
8932 }
8933 m_connman.PushMessage(
8934 pto, msgMaker.Make(NetMsgType::HEADERS, vHeaders));
8935 state.pindexBestHeaderSent = pBestIndex;
8936 } else {
8937 fRevertToInv = true;
8938 }
8939 }
8940 if (fRevertToInv) {
8941 // If falling back to using an inv, just try to inv the tip. The
8942 // last entry in m_blocks_for_headers_relay was our tip at some
8943 // point in the past.
8944 if (!peer->m_blocks_for_headers_relay.empty()) {
8945 const BlockHash &hashToAnnounce =
8946 peer->m_blocks_for_headers_relay.back();
8947 const CBlockIndex *pindex =
8948 m_chainman.m_blockman.LookupBlockIndex(hashToAnnounce);
8949 assert(pindex);
8950
8951 // Warn if we're announcing a block that is not on the main
8952 // chain. This should be very rare and could be optimized
8953 // out. Just log for now.
8954 if (m_chainman.ActiveChain()[pindex->nHeight] != pindex) {
8955 LogPrint(
8956 BCLog::NET,
8957 "Announcing block %s not on main chain (tip=%s)\n",
8958 hashToAnnounce.ToString(),
8959 m_chainman.ActiveChain()
8960 .Tip()
8961 ->GetBlockHash()
8962 .ToString());
8963 }
8964
8965 // If the peer's chain has this block, don't inv it back.
8966 if (!PeerHasHeader(&state, pindex)) {
8967 peer->m_blocks_for_inv_relay.push_back(hashToAnnounce);
8969 "%s: sending inv peer=%d hash=%s\n", __func__,
8970 pto->GetId(), hashToAnnounce.ToString());
8971 }
8972 }
8973 }
8974 peer->m_blocks_for_headers_relay.clear();
8975 }
8976 } // release cs_main
8977
8978 //
8979 // Message: inventory
8980 //
8981 std::vector<CInv> vInv;
8982 auto addInvAndMaybeFlush = [&](uint32_t type, const uint256 &hash) {
8983 vInv.emplace_back(type, hash);
8984 if (vInv.size() == MAX_INV_SZ) {
8985 m_connman.PushMessage(
8986 pto, msgMaker.Make(NetMsgType::INV, std::move(vInv)));
8987 vInv.clear();
8988 }
8989 };
8990
8991 {
8992 LOCK(cs_main);
8993
8994 {
8995 LOCK(peer->m_block_inv_mutex);
8996
8997 vInv.reserve(std::max<size_t>(peer->m_blocks_for_inv_relay.size(),
8999 config.GetMaxBlockSize() /
9000 1000000));
9001
9002 // Add blocks
9003 for (const BlockHash &hash : peer->m_blocks_for_inv_relay) {
9004 addInvAndMaybeFlush(MSG_BLOCK, hash);
9005 }
9006 peer->m_blocks_for_inv_relay.clear();
9007 }
9008
9009 auto computeNextInvSendTime =
9010 [&](std::chrono::microseconds &next)
9011 EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex) -> bool {
9012 bool fSendTrickle = pto->HasPermission(NetPermissionFlags::NoBan);
9013
9014 if (next < current_time) {
9015 fSendTrickle = true;
9016 if (pto->IsInboundConn()) {
9017 next = NextInvToInbounds(
9019 } else {
9020 // Skip delay for outbound peers, as there is less privacy
9021 // concern for them.
9022 next = current_time;
9023 }
9024 }
9025
9026 return fSendTrickle;
9027 };
9028
9029 // Add proofs to inventory
9030 if (peer->m_proof_relay != nullptr) {
9031 LOCK(peer->m_proof_relay->m_proof_inventory_mutex);
9032
9033 if (computeNextInvSendTime(
9034 peer->m_proof_relay->m_next_inv_send_time)) {
9035 auto it =
9036 peer->m_proof_relay->m_proof_inventory_to_send.begin();
9037 while (it !=
9038 peer->m_proof_relay->m_proof_inventory_to_send.end()) {
9039 const avalanche::ProofId proofid = *it;
9040
9041 it = peer->m_proof_relay->m_proof_inventory_to_send.erase(
9042 it);
9043
9044 if (peer->m_proof_relay->m_proof_inventory_known_filter
9045 .contains(proofid)) {
9046 continue;
9047 }
9048
9049 peer->m_proof_relay->m_proof_inventory_known_filter.insert(
9050 proofid);
9051 addInvAndMaybeFlush(MSG_AVA_PROOF, proofid);
9052 peer->m_proof_relay->m_recently_announced_proofs.insert(
9053 proofid);
9054 }
9055 }
9056 }
9057
9058 if (auto tx_relay = peer->GetTxRelay()) {
9059 LOCK(tx_relay->m_tx_inventory_mutex);
9060 // Check whether periodic sends should happen
9061 const bool fSendTrickle =
9062 computeNextInvSendTime(tx_relay->m_next_inv_send_time);
9063
9064 // Time to send but the peer has requested we not relay
9065 // transactions.
9066 if (fSendTrickle) {
9067 LOCK(tx_relay->m_bloom_filter_mutex);
9068 if (!tx_relay->m_relay_txs) {
9069 tx_relay->m_tx_inventory_to_send.clear();
9070 }
9071 }
9072
9073 // Respond to BIP35 mempool requests
9074 if (fSendTrickle && tx_relay->m_send_mempool) {
9075 auto vtxinfo = m_mempool.infoAll();
9076 tx_relay->m_send_mempool = false;
9077 const CFeeRate filterrate{
9078 tx_relay->m_fee_filter_received.load()};
9079
9080 LOCK(tx_relay->m_bloom_filter_mutex);
9081
9082 for (const auto &txinfo : vtxinfo) {
9083 const TxId &txid = txinfo.tx->GetId();
9084 tx_relay->m_tx_inventory_to_send.erase(txid);
9085 // Don't send transactions that peers will not put into
9086 // their mempool
9087 if (txinfo.fee < filterrate.GetFee(txinfo.vsize)) {
9088 continue;
9089 }
9090 if (tx_relay->m_bloom_filter &&
9091 !tx_relay->m_bloom_filter->IsRelevantAndUpdate(
9092 *txinfo.tx)) {
9093 continue;
9094 }
9095 tx_relay->m_tx_inventory_known_filter.insert(txid);
9096 // Responses to MEMPOOL requests bypass the
9097 // m_recently_announced_invs filter.
9098 addInvAndMaybeFlush(MSG_TX, txid);
9099 }
9100 tx_relay->m_last_mempool_req =
9101 std::chrono::duration_cast<std::chrono::seconds>(
9102 current_time);
9103 }
9104
9105 // Determine transactions to relay
9106 if (fSendTrickle) {
9107 // Produce a vector with all candidates for sending
9108 std::vector<std::set<TxId>::iterator> vInvTx;
9109 vInvTx.reserve(tx_relay->m_tx_inventory_to_send.size());
9110 for (std::set<TxId>::iterator it =
9111 tx_relay->m_tx_inventory_to_send.begin();
9112 it != tx_relay->m_tx_inventory_to_send.end(); it++) {
9113 vInvTx.push_back(it);
9114 }
9115 const CFeeRate filterrate{
9116 tx_relay->m_fee_filter_received.load()};
9117 // Send out the inventory in the order of admission to our
9118 // mempool, which is guaranteed to be a topological sort order.
9119 // A heap is used so that not all items need sorting if only a
9120 // few are being sent.
9121 CompareInvMempoolOrder compareInvMempoolOrder(&m_mempool);
9122 std::make_heap(vInvTx.begin(), vInvTx.end(),
9123 compareInvMempoolOrder);
9124 // No reason to drain out at many times the network's
9125 // capacity, especially since we have many peers and some
9126 // will draw much shorter delays.
9127 unsigned int nRelayedTransactions = 0;
9128 LOCK(tx_relay->m_bloom_filter_mutex);
9129 while (!vInvTx.empty() &&
9130 nRelayedTransactions < INVENTORY_BROADCAST_MAX_PER_MB *
9131 config.GetMaxBlockSize() /
9132 1000000) {
9133 // Fetch the top element from the heap
9134 std::pop_heap(vInvTx.begin(), vInvTx.end(),
9135 compareInvMempoolOrder);
9136 std::set<TxId>::iterator it = vInvTx.back();
9137 vInvTx.pop_back();
9138 const TxId txid = *it;
9139 // Remove it from the to-be-sent set
9140 tx_relay->m_tx_inventory_to_send.erase(it);
9141 // Check if not in the filter already
9142 if (tx_relay->m_tx_inventory_known_filter.contains(txid) &&
9143 tx_relay->m_avalanche_stalled_txids.count(txid) == 0) {
9144 continue;
9145 }
9146 // Not in the mempool anymore? don't bother sending it.
9147 auto txinfo = m_mempool.info(txid);
9148 if (!txinfo.tx) {
9149 continue;
9150 }
9151 // Peer told you to not send transactions at that
9152 // feerate? Don't bother sending it.
9153 if (txinfo.fee < filterrate.GetFee(txinfo.vsize)) {
9154 continue;
9155 }
9156 if (tx_relay->m_bloom_filter &&
9157 !tx_relay->m_bloom_filter->IsRelevantAndUpdate(
9158 *txinfo.tx)) {
9159 continue;
9160 }
9161 // Send
9162 tx_relay->m_recently_announced_invs.insert(txid);
9163 addInvAndMaybeFlush(MSG_TX, txid);
9164 nRelayedTransactions++;
9165 tx_relay->m_tx_inventory_known_filter.insert(txid);
9166 tx_relay->m_avalanche_stalled_txids.erase(txid);
9167 }
9168 }
9169 }
9170 } // release cs_main
9171
9172 if (!vInv.empty()) {
9173 m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::INV, vInv));
9174 }
9175
9176 {
9177 LOCK(cs_main);
9178
9179 CNodeState &state = *State(pto->GetId());
9180
9181 // Detect whether we're stalling
9182 auto stalling_timeout = m_block_stalling_timeout.load();
9183 if (state.m_stalling_since.count() &&
9184 state.m_stalling_since < current_time - stalling_timeout) {
9185 // Stalling only triggers when the block download window cannot
9186 // move. During normal steady state, the download window should be
9187 // much larger than the to-be-downloaded set of blocks, so
9188 // disconnection should only happen during initial block download.
9189 LogPrintf("Peer=%d is stalling block download, disconnecting\n",
9190 pto->GetId());
9191 pto->fDisconnect = true;
9192 // Increase timeout for the next peer so that we don't disconnect
9193 // multiple peers if our own bandwidth is insufficient.
9194 const auto new_timeout =
9195 std::min(2 * stalling_timeout, BLOCK_STALLING_TIMEOUT_MAX);
9196 if (stalling_timeout != new_timeout &&
9197 m_block_stalling_timeout.compare_exchange_strong(
9198 stalling_timeout, new_timeout)) {
9199 LogPrint(
9200 BCLog::NET,
9201 "Increased stalling timeout temporarily to %d seconds\n",
9202 count_seconds(new_timeout));
9203 }
9204 return true;
9205 }
9206 // In case there is a block that has been in flight from this peer for
9207 // block_interval * (1 + 0.5 * N) (with N the number of peers from which
9208 // we're downloading validated blocks), disconnect due to timeout.
9209 // We compensate for other peers to prevent killing off peers due to our
9210 // own downstream link being saturated. We only count validated
9211 // in-flight blocks so peers can't advertise non-existing block hashes
9212 // to unreasonably increase our timeout.
9213 if (state.vBlocksInFlight.size() > 0) {
9214 QueuedBlock &queuedBlock = state.vBlocksInFlight.front();
9215 int nOtherPeersWithValidatedDownloads =
9216 m_peers_downloading_from - 1;
9217 if (current_time >
9218 state.m_downloading_since +
9219 std::chrono::seconds{consensusParams.nPowTargetSpacing} *
9222 nOtherPeersWithValidatedDownloads)) {
9223 LogPrintf("Timeout downloading block %s from peer=%d, "
9224 "disconnecting\n",
9225 queuedBlock.pindex->GetBlockHash().ToString(),
9226 pto->GetId());
9227 pto->fDisconnect = true;
9228 return true;
9229 }
9230 }
9231
9232 // Check for headers sync timeouts
9233 if (state.fSyncStarted &&
9234 peer->m_headers_sync_timeout < std::chrono::microseconds::max()) {
9235 // Detect whether this is a stalling initial-headers-sync peer
9236 if (m_chainman.m_best_header->Time() <= GetAdjustedTime() - 24h) {
9237 if (current_time > peer->m_headers_sync_timeout &&
9238 nSyncStarted == 1 &&
9239 (m_num_preferred_download_peers -
9240 state.fPreferredDownload >=
9241 1)) {
9242 // Disconnect a peer (without NetPermissionFlags::NoBan
9243 // permission) if it is our only sync peer, and we have
9244 // others we could be using instead. Note: If all our peers
9245 // are inbound, then we won't disconnect our sync peer for
9246 // stalling; we have bigger problems if we can't get any
9247 // outbound peers.
9249 LogPrintf("Timeout downloading headers from peer=%d, "
9250 "disconnecting\n",
9251 pto->GetId());
9252 pto->fDisconnect = true;
9253 return true;
9254 } else {
9255 LogPrintf("Timeout downloading headers from noban "
9256 "peer=%d, not disconnecting\n",
9257 pto->GetId());
9258 // Reset the headers sync state so that we have a chance
9259 // to try downloading from a different peer. Note: this
9260 // will also result in at least one more getheaders
9261 // message to be sent to this peer (eventually).
9262 state.fSyncStarted = false;
9263 nSyncStarted--;
9264 peer->m_headers_sync_timeout = 0us;
9265 }
9266 }
9267 } else {
9268 // After we've caught up once, reset the timeout so we can't
9269 // trigger disconnect later.
9270 peer->m_headers_sync_timeout = std::chrono::microseconds::max();
9271 }
9272 }
9273
9274 // Check that outbound peers have reasonable chains GetTime() is used by
9275 // this anti-DoS logic so we can test this using mocktime.
9276 ConsiderEviction(*pto, *peer, GetTime<std::chrono::seconds>());
9277 } // release cs_main
9278
9279 std::vector<CInv> vGetData;
9280
9281 //
9282 // Message: getdata (blocks)
9283 //
9284 {
9285 LOCK(cs_main);
9286
9287 CNodeState &state = *State(pto->GetId());
9288
9289 if (CanServeBlocks(*peer) &&
9290 ((sync_blocks_and_headers_from_peer && !IsLimitedPeer(*peer)) ||
9291 !m_chainman.IsInitialBlockDownload()) &&
9292 state.vBlocksInFlight.size() < MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
9293 std::vector<const CBlockIndex *> vToDownload;
9294 NodeId staller = -1;
9295 auto get_inflight_budget = [&state]() {
9296 return std::max(
9298 static_cast<int>(state.vBlocksInFlight.size()));
9299 };
9300
9301 // If a snapshot chainstate is in use, we want to find its next
9302 // blocks before the background chainstate to prioritize getting to
9303 // network tip.
9304 FindNextBlocksToDownload(*peer, get_inflight_budget(), vToDownload,
9305 staller);
9306 if (m_chainman.BackgroundSyncInProgress() &&
9307 !IsLimitedPeer(*peer)) {
9308 // If the background tip is not an ancestor of the snapshot
9309 // block, we need to start requesting blocks from their last
9310 // common ancestor.
9311 const CBlockIndex *from_tip =
9313 m_chainman.GetSnapshotBaseBlock());
9314
9315 TryDownloadingHistoricalBlocks(
9316 *peer, get_inflight_budget(), vToDownload, from_tip,
9317 Assert(m_chainman.GetSnapshotBaseBlock()));
9318 }
9319 for (const CBlockIndex *pindex : vToDownload) {
9320 vGetData.push_back(CInv(MSG_BLOCK, pindex->GetBlockHash()));
9321 BlockRequested(config, pto->GetId(), *pindex);
9322 LogPrint(BCLog::NET, "Requesting block %s (%d) peer=%d\n",
9323 pindex->GetBlockHash().ToString(), pindex->nHeight,
9324 pto->GetId());
9325 }
9326 if (state.vBlocksInFlight.empty() && staller != -1) {
9327 if (State(staller)->m_stalling_since == 0us) {
9328 State(staller)->m_stalling_since = current_time;
9329 LogPrint(BCLog::NET, "Stall started peer=%d\n", staller);
9330 }
9331 }
9332 }
9333 } // release cs_main
9334
9335 auto addGetDataAndMaybeFlush = [&](uint32_t type, const uint256 &hash) {
9336 CInv inv(type, hash);
9337 LogPrint(BCLog::NET, "Requesting %s from peer=%d\n", inv.ToString(),
9338 pto->GetId());
9339 vGetData.push_back(std::move(inv));
9340 if (vGetData.size() >= MAX_GETDATA_SZ) {
9341 m_connman.PushMessage(
9342 pto, msgMaker.Make(NetMsgType::GETDATA, std::move(vGetData)));
9343 vGetData.clear();
9344 }
9345 };
9346
9347 //
9348 // Message: getdata (proof)
9349 //
9350 if (m_avalanche) {
9351 LOCK(cs_proofrequest);
9352 std::vector<std::pair<NodeId, avalanche::ProofId>> expired;
9353 auto requestable =
9354 m_proofrequest.GetRequestable(pto->GetId(), current_time, &expired);
9355 for (const auto &entry : expired) {
9357 "timeout of inflight proof %s from peer=%d\n",
9358 entry.second.ToString(), entry.first);
9359 }
9360 for (const auto &proofid : requestable) {
9361 if (!AlreadyHaveProof(proofid)) {
9362 addGetDataAndMaybeFlush(MSG_AVA_PROOF, proofid);
9363 m_proofrequest.RequestedData(
9364 pto->GetId(), proofid,
9365 current_time + PROOF_REQUEST_PARAMS.getdata_interval);
9366 } else {
9367 // We have already seen this proof, no need to download.
9368 // This is just a belt-and-suspenders, as this should
9369 // already be called whenever a proof becomes
9370 // AlreadyHaveProof().
9371 m_proofrequest.ForgetInvId(proofid);
9372 }
9373 }
9374 }
9375
9376 //
9377 // Message: getdata (transactions)
9378 //
9379 {
9380 LOCK(cs_main);
9381 std::vector<std::pair<NodeId, TxId>> expired;
9382 auto requestable =
9383 m_txrequest.GetRequestable(pto->GetId(), current_time, &expired);
9384 for (const auto &entry : expired) {
9385 LogPrint(BCLog::NET, "timeout of inflight tx %s from peer=%d\n",
9386 entry.second.ToString(), entry.first);
9387 }
9388 for (const TxId &txid : requestable) {
9389 // Exclude m_recent_rejects_package_reconsiderable: we may be
9390 // requesting a missing parent that was previously rejected for
9391 // being too low feerate.
9392 if (!AlreadyHaveTx(txid, /*include_reconsiderable=*/false)) {
9393 addGetDataAndMaybeFlush(MSG_TX, txid);
9394 m_txrequest.RequestedData(
9395 pto->GetId(), txid,
9396 current_time + TX_REQUEST_PARAMS.getdata_interval);
9397 } else {
9398 // We have already seen this transaction, no need to download.
9399 // This is just a belt-and-suspenders, as this should already be
9400 // called whenever a transaction becomes AlreadyHaveTx().
9401 m_txrequest.ForgetInvId(txid);
9402 }
9403 }
9404
9405 if (!vGetData.empty()) {
9406 m_connman.PushMessage(pto,
9407 msgMaker.Make(NetMsgType::GETDATA, vGetData));
9408 }
9409
9410 } // release cs_main
9411 MaybeSendFeefilter(*pto, *peer, current_time);
9412 return true;
9413}
9414
9415bool PeerManagerImpl::ReceivedAvalancheProof(CNode &node, Peer &peer,
9416 const avalanche::ProofRef &proof) {
9417 assert(proof != nullptr);
9418
9419 const avalanche::ProofId &proofid = proof->getId();
9420
9421 AddKnownProof(peer, proofid);
9422
9423 if (m_chainman.IsInitialBlockDownload()) {
9424 // We cannot reliably verify proofs during IBD, so bail out early and
9425 // keep the inventory as pending so it can be requested when the node
9426 // has synced.
9427 return true;
9428 }
9429
9430 const NodeId nodeid = node.GetId();
9431
9432 const bool isStaker = WITH_LOCK(node.cs_avalanche_pubkey,
9433 return node.m_avalanche_pubkey.has_value());
9434 auto saveProofIfStaker = [this, isStaker](const CNode &node,
9435 const avalanche::ProofId &proofid,
9436 const NodeId nodeid) -> bool {
9437 if (isStaker) {
9438 return m_avalanche->withPeerManager(
9439 [&](avalanche::PeerManager &pm) {
9440 return pm.saveRemoteProof(proofid, nodeid, true);
9441 });
9442 }
9443
9444 return false;
9445 };
9446
9447 {
9448 LOCK(cs_proofrequest);
9449 m_proofrequest.ReceivedResponse(nodeid, proofid);
9450
9451 if (AlreadyHaveProof(proofid)) {
9452 m_proofrequest.ForgetInvId(proofid);
9453 saveProofIfStaker(node, proofid, nodeid);
9454 return true;
9455 }
9456 }
9457
9458 // registerProof should not be called while cs_proofrequest because it
9459 // holds cs_main and that creates a potential deadlock during shutdown
9460
9462 if (m_avalanche->withPeerManager([&](avalanche::PeerManager &pm) {
9463 return pm.registerProof(proof, state);
9464 })) {
9465 WITH_LOCK(cs_proofrequest, m_proofrequest.ForgetInvId(proofid));
9466 RelayProof(proofid);
9467
9468 node.m_last_proof_time = GetTime<std::chrono::seconds>();
9469
9470 LogPrint(BCLog::NET, "New avalanche proof: peer=%d, proofid %s\n",
9471 nodeid, proofid.ToString());
9472 }
9473
9475 m_avalanche->withPeerManager(
9476 [&](avalanche::PeerManager &pm) { pm.setInvalid(proofid); });
9477 Misbehaving(peer, state.GetRejectReason());
9478 return false;
9479 }
9480
9482 // This is possible that a proof contains a utxo we don't know yet, so
9483 // don't ban for this.
9484 return false;
9485 }
9486
9487 // Unlike other reasons we can expect lots of peers to send a proof that we
9488 // have dangling. In this case we don't want to print a lot of useless debug
9489 // message, the proof will be polled as soon as it's considered again.
9490 if (!m_avalanche->reconcileOrFinalize(proof) &&
9493 "Not polling the avalanche proof (%s): peer=%d, proofid %s\n",
9494 state.IsValid() ? "not-worth-polling"
9495 : state.GetRejectReason(),
9496 nodeid, proofid.ToString());
9497 }
9498
9499 saveProofIfStaker(node, proofid, nodeid);
9500 return true;
9501}
bool MoneyRange(const Amount nValue)
Definition: amount.h:171
static constexpr Amount MAX_MONEY
No amount larger than this (in satoshi) is valid.
Definition: amount.h:170
@ READ_STATUS_OK
@ READ_STATUS_INVALID
@ READ_STATUS_FAILED
enum ReadStatus_t ReadStatus
const std::string & BlockFilterTypeName(BlockFilterType filter_type)
Get the human-readable name for a filter type.
BlockFilterType
Definition: blockfilter.h:88
BlockFilterIndex * GetBlockFilterIndex(BlockFilterType filter_type)
Get a block filter index by type.
static constexpr int CFCHECKPT_INTERVAL
Interval between compact filter checkpoints.
@ CHAIN
Outputs do not overspend inputs, no double spends, coinbase output ok, no immature coinbase spends,...
@ TRANSACTIONS
Only first tx is coinbase, 2 <= coinbase input script length <= 100, transactions valid,...
@ SCRIPTS
Scripts & signatures ok.
@ TREE
All parent headers found, difficulty matches, timestamp >= median previous, checkpoint.
arith_uint256 GetBlockProof(const CBlockIndex &block)
Definition: chain.cpp:74
CBlockLocator GetLocator(const CBlockIndex *index)
Get a locator for a block index entry.
Definition: chain.cpp:41
int64_t GetBlockProofEquivalentTime(const CBlockIndex &to, const CBlockIndex &from, const CBlockIndex &tip, const Consensus::Params &params)
Return the time it would take to redo the work difference between from and to, assuming the current h...
Definition: chain.cpp:89
const CBlockIndex * LastCommonAncestor(const CBlockIndex *pa, const CBlockIndex *pb)
Find the last common ancestor two blocks have.
Definition: chain.cpp:112
#define Assert(val)
Identity function.
Definition: check.h:84
#define Assume(val)
Assume is the identity function.
Definition: check.h:97
Stochastic address manager.
Definition: addrman.h:68
void Connected(const CService &addr, NodeSeconds time=Now< NodeSeconds >())
We have successfully connected to this peer.
Definition: addrman.cpp:1327
void Good(const CService &addr, bool test_before_evict=true, NodeSeconds time=Now< NodeSeconds >())
Mark an entry as accessible, possibly moving it from "new" to "tried".
Definition: addrman.cpp:1300
bool Add(const std::vector< CAddress > &vAddr, const CNetAddr &source, std::chrono::seconds time_penalty=0s)
Attempt to add one or more addresses to addrman's new table.
Definition: addrman.cpp:1295
void SetServices(const CService &addr, ServiceFlags nServices)
Update an entry's service bits.
Definition: addrman.cpp:1331
Definition: banman.h:59
void Discourage(const CNetAddr &net_addr)
Definition: banman.cpp:116
bool IsBanned(const CNetAddr &net_addr)
Return whether net_addr is banned.
Definition: banman.cpp:83
bool IsDiscouraged(const CNetAddr &net_addr)
Return whether net_addr is discouraged.
Definition: banman.cpp:78
BlockFilterIndex is used to store and retrieve block filters, hashes, and headers for a range of bloc...
bool LookupFilterRange(int start_height, const CBlockIndex *stop_index, std::vector< BlockFilter > &filters_out) const
Get a range of filters between two heights on a chain.
bool LookupFilterHashRange(int start_height, const CBlockIndex *stop_index, std::vector< uint256 > &hashes_out) const
Get a range of filter hashes between two heights on a chain.
bool LookupFilterHeader(const CBlockIndex *block_index, uint256 &header_out) EXCLUSIVE_LOCKS_REQUIRED(!m_cs_headers_cache)
Get a single filter header by block.
std::vector< CTransactionRef > txn
std::vector< uint32_t > indices
A CService with information about it as peer.
Definition: protocol.h:443
ServiceFlags nServices
Serialized as uint64_t in V1, and as CompactSize in V2.
Definition: protocol.h:547
NodeSeconds nTime
Always included in serialization, except in the network format on INIT_PROTO_VERSION.
Definition: protocol.h:545
Nodes collect new transactions into a block, hash them into a hash tree, and scan through nonce value...
Definition: block.h:23
BlockHash GetHash() const
Definition: block.cpp:11
uint32_t nTime
Definition: block.h:29
BlockHash hashPrevBlock
Definition: block.h:27
Definition: block.h:60
std::vector< CTransactionRef > vtx
Definition: block.h:63
The block chain is a tree shaped structure starting with the genesis block at the root,...
Definition: blockindex.h:25
bool IsValid(enum BlockValidity nUpTo=BlockValidity::TRANSACTIONS) const EXCLUSIVE_LOCKS_REQUIRED(
Check whether this block index entry is valid up to the passed validity level.
Definition: blockindex.h:191
CBlockIndex * pprev
pointer to the index of the predecessor of this block
Definition: blockindex.h:32
CBlockHeader GetBlockHeader() const
Definition: blockindex.h:117
arith_uint256 nChainWork
(memory only) Total amount of work (expected number of hashes) in the chain up to and including this ...
Definition: blockindex.h:51
bool HaveNumChainTxs() const
Check whether this block and all previous blocks back to the genesis block or an assumeutxo snapshot ...
Definition: blockindex.h:154
int64_t GetBlockTime() const
Definition: blockindex.h:160
unsigned int nTx
Number of transactions in this block.
Definition: blockindex.h:55
NodeSeconds Time() const
Definition: blockindex.h:156
CBlockIndex * GetAncestor(int height)
Efficiently find an ancestor of this block.
Definition: blockindex.cpp:62
BlockHash GetBlockHash() const
Definition: blockindex.h:130
int nHeight
height of the entry in the chain. The genesis block has height 0
Definition: blockindex.h:38
FlatFilePos GetBlockPos() const EXCLUSIVE_LOCKS_REQUIRED(
Definition: blockindex.h:97
BloomFilter is a probabilistic filter which SPV clients provide so that we can filter the transaction...
Definition: bloom.h:44
bool IsWithinSizeConstraints() const
True if the size is <= MAX_BLOOM_FILTER_SIZE and the number of hash functions is <= MAX_HASH_FUNCS (c...
Definition: bloom.cpp:93
An in-memory indexed chain of blocks.
Definition: chain.h:138
CBlockIndex * Tip() const
Returns the index entry for the tip of this chain, or nullptr if none.
Definition: chain.h:154
CBlockIndex * Next(const CBlockIndex *pindex) const
Find the successor of a block in this chain, or nullptr if the given index is not found or is the tip...
Definition: chain.h:178
int Height() const
Return the maximal height in the chain.
Definition: chain.h:190
bool Contains(const CBlockIndex *pindex) const
Efficiently check whether a block is present in this chain.
Definition: chain.h:170
CChainParams defines various tweakable parameters of a given instance of the Bitcoin system.
Definition: chainparams.h:86
const CBlock & GenesisBlock() const
Definition: chainparams.h:112
const Consensus::Params & GetConsensus() const
Definition: chainparams.h:98
CCoinsView that adds a memory cache for transactions to another CCoinsView.
Definition: coins.h:363
CCoinsView that brings transactions from a mempool into view.
Definition: txmempool.h:647
Definition: net.h:824
void ForEachNode(const NodeFn &func)
Definition: net.h:929
bool OutboundTargetReached(bool historicalBlockServingLimit) const
check if the outbound target is reached.
Definition: net.cpp:2893
bool ForNode(NodeId id, std::function< bool(CNode *pnode)> func)
Definition: net.cpp:3088
bool GetNetworkActive() const
Definition: net.h:916
bool GetTryNewOutboundPeer() const
Definition: net.cpp:1593
void SetTryNewOutboundPeer(bool flag)
Definition: net.cpp:1597
int GetExtraBlockRelayCount() const
Definition: net.cpp:1625
void WakeMessageHandler() EXCLUSIVE_LOCKS_REQUIRED(!mutexMsgProc)
Definition: net.cpp:1423
void StartExtraBlockRelayPeers()
Definition: net.h:974
bool DisconnectNode(const std::string &node)
Definition: net.cpp:2804
CSipHasher GetDeterministicRandomizer(uint64_t id) const
Get a unique deterministic randomizer.
Definition: net.cpp:3100
int GetExtraFullOutboundCount() const
Definition: net.cpp:1609
std::vector< CAddress > GetAddresses(size_t max_addresses, size_t max_pct, std::optional< Network > network) const
Return all or many randomly selected addresses, optionally by network.
Definition: net.cpp:2672
bool CheckIncomingNonce(uint64_t nonce)
Definition: net.cpp:398
bool ShouldRunInactivityChecks(const CNode &node, std::chrono::seconds now) const
Return true if we should disconnect the peer for failing an inactivity check.
Definition: net.cpp:1204
void PushMessage(CNode *pnode, CSerializedNetMsg &&msg)
Definition: net.cpp:3042
bool GetUseAddrmanOutgoing() const
Definition: net.h:917
int GetType() const
Definition: streams.h:397
int GetVersion() const
Definition: streams.h:399
Fee rate in satoshis per kilobyte: Amount / kB.
Definition: feerate.h:21
Amount GetFeePerK() const
Return the fee in satoshis for a size of 1000 bytes.
Definition: feerate.h:54
Inv(ventory) message data.
Definition: protocol.h:582
bool IsMsgCmpctBlk() const
Definition: protocol.h:621
bool IsMsgBlk() const
Definition: protocol.h:613
std::string ToString() const
Definition: protocol.cpp:242
uint32_t type
Definition: protocol.h:584
bool IsMsgTx() const
Definition: protocol.h:601
bool IsMsgStakeContender() const
Definition: protocol.h:609
bool IsMsgFilteredBlk() const
Definition: protocol.h:617
uint256 hash
Definition: protocol.h:585
bool IsMsgProof() const
Definition: protocol.h:605
bool IsGenBlkMsg() const
Definition: protocol.h:626
void TransactionInvalidated(const CTransactionRef &tx, std::shared_ptr< const std::vector< Coin > > spent_coins)
Used to create a Merkle proof (usually from a subset of transactions), which consists of a block head...
Definition: merkleblock.h:147
std::vector< std::pair< size_t, uint256 > > vMatchedTxn
Public only for unit testing and relay testing (not relayed).
Definition: merkleblock.h:159
bool IsRelayable() const
Whether this address should be relayed to other peers even if we can't reach it ourselves.
Definition: netaddress.h:252
bool IsRoutable() const
Definition: netaddress.cpp:509
bool IsValid() const
Definition: netaddress.cpp:474
bool IsLocal() const
Definition: netaddress.cpp:448
bool IsAddrV1Compatible() const
Check if the current object can be serialized in pre-ADDRv2/BIP155 format.
Definition: netaddress.cpp:525
Transport protocol agnostic message container.
Definition: net.h:256
CSerializedNetMsg Make(int nFlags, std::string msg_type, Args &&...args) const
Information about a peer.
Definition: net.h:395
Mutex cs_avalanche_pubkey
Definition: net.h:587
bool IsFeelerConn() const
Definition: net.h:518
const std::chrono::seconds m_connected
Unix epoch time at peer connection.
Definition: net.h:429
bool ExpectServicesFromConn() const
Definition: net.h:532
std::atomic< int > nVersion
Definition: net.h:439
std::atomic_bool m_has_all_wanted_services
Whether this peer provides all services that we want.
Definition: net.h:570
bool IsInboundConn() const
Definition: net.h:524
bool HasPermission(NetPermissionFlags permission) const
Definition: net.h:452
bool IsOutboundOrBlockRelayConn() const
Definition: net.h:491
NodeId GetId() const
Definition: net.h:687
bool IsManualConn() const
Definition: net.h:512
std::atomic< int64_t > nTimeOffset
Definition: net.h:430
const std::string m_addr_name
Definition: net.h:435
std::string ConnectionTypeAsString() const
Definition: net.h:733
void SetCommonVersion(int greatest_common_version)
Definition: net.h:709
std::atomic< bool > m_bip152_highbandwidth_to
Definition: net.h:562
std::atomic_bool m_relays_txs
Whether we should relay transactions to this peer.
Definition: net.h:576
std::atomic< bool > m_bip152_highbandwidth_from
Definition: net.h:564
void PongReceived(std::chrono::microseconds ping_time)
A ping-pong round trip has completed successfully.
Definition: net.h:682
std::atomic_bool fSuccessfullyConnected
Definition: net.h:455
bool IsAddrFetchConn() const
Definition: net.h:520
uint64_t GetLocalNonce() const
Definition: net.h:689
const CAddress addr
Definition: net.h:432
void SetAddrLocal(const CService &addrLocalIn) EXCLUSIVE_LOCKS_REQUIRED(!m_addr_local_mutex)
May not be called more than once.
Definition: net.cpp:611
bool IsBlockOnlyConn() const
Definition: net.h:514
int GetCommonVersion() const
Definition: net.h:713
bool IsFullOutboundConn() const
Definition: net.h:507
uint64_t nRemoteHostNonce
Definition: net.h:441
Mutex m_subver_mutex
cleanSubVer is a sanitized string of the user agent byte array we read from the wire.
Definition: net.h:448
std::atomic_bool fPauseSend
Definition: net.h:464
std::chrono::seconds m_nextGetAvaAddr
Definition: net.h:617
uint64_t nRemoteExtraEntropy
Definition: net.h:443
std::optional< std::pair< CNetMessage, bool > > PollMessage() EXCLUSIVE_LOCKS_REQUIRED(!m_msg_process_queue_mutex)
Poll the next message from the processing queue of this connection.
Definition: net.cpp:3022
uint64_t GetLocalExtraEntropy() const
Definition: net.h:690
SteadyMilliseconds m_last_poll
Definition: net.h:633
double getAvailabilityScore() const
Definition: net.cpp:2963
std::atomic_bool m_bloom_filter_loaded
Whether this peer has loaded a bloom filter.
Definition: net.h:582
void updateAvailabilityScore(double decayFactor)
The availability score is calculated using an exponentially weighted average.
Definition: net.cpp:2948
std::atomic< std::chrono::seconds > m_avalanche_last_message_fault
Definition: net.h:620
const bool m_inbound_onion
Whether this peer is an inbound onion, i.e.
Definition: net.h:438
std::atomic< int > m_avalanche_message_fault_counter
How much faulty messages did this node accumulate.
Definition: net.h:625
std::atomic< bool > m_avalanche_enabled
Definition: net.h:585
std::atomic< std::chrono::seconds > m_last_block_time
UNIX epoch time of the last block received from this peer that we had not yet seen (e....
Definition: net.h:642
std::atomic_bool fDisconnect
Definition: net.h:458
std::atomic< int > m_avalanche_message_fault_score
This score is incremented for every new faulty message received when m_avalanche_message_fault_counte...
Definition: net.h:631
std::atomic< std::chrono::seconds > m_last_tx_time
UNIX epoch time of the last transaction received from this peer that we had not yet seen (e....
Definition: net.h:650
void invsVoted(uint32_t count)
The node voted for count invs.
Definition: net.cpp:2944
bool IsAvalancheOutboundConnection() const
Definition: net.h:528
An encapsulated public key.
Definition: pubkey.h:31
RollingBloomFilter is a probabilistic "keep track of most recently inserted" set.
Definition: bloom.h:115
Simple class for background tasks that should be run periodically or once "after a while".
Definition: scheduler.h:41
void scheduleEvery(Predicate p, std::chrono::milliseconds delta) EXCLUSIVE_LOCKS_REQUIRED(!newTaskMutex)
Repeat p until it return false.
Definition: scheduler.cpp:114
void scheduleFromNow(Function f, std::chrono::milliseconds delta) EXCLUSIVE_LOCKS_REQUIRED(!newTaskMutex)
Call f once after the delta has passed.
Definition: scheduler.h:56
A combination of a network address (CNetAddr) and a (TCP) port.
Definition: netaddress.h:569
std::string ToString() const
std::vector< uint8_t > GetKey() const
SipHash-2-4.
Definition: siphash.h:14
uint64_t Finalize() const
Compute the 64-bit SipHash-2-4 of the data written so far.
Definition: siphash.cpp:83
CSipHasher & Write(uint64_t data)
Hash a 64-bit integer worth of data.
Definition: siphash.cpp:36
std::set< std::reference_wrapper< const CTxMemPoolEntryRef >, CompareIteratorById > Parents
Definition: mempool_entry.h:70
CTxMemPool stores valid-according-to-the-current-best-chain transactions that may be included in the ...
Definition: txmempool.h:221
void removeConflicts(const CTransaction &tx) EXCLUSIVE_LOCKS_REQUIRED(cs)
Definition: txmempool.cpp:301
void RemoveUnbroadcastTx(const TxId &txid, const bool unchecked=false)
Removes a transaction from the unbroadcast set.
Definition: txmempool.cpp:826
CFeeRate GetMinFee() const
The minimum fee to get into the mempool, which may itself not be enough for larger-sized transactions...
Definition: txmempool.h:463
RecursiveMutex cs
This mutex needs to be locked when accessing mapTx or other members that are guarded by it.
Definition: txmempool.h:317
void removeRecursive(const CTransaction &tx, MemPoolRemovalReason reason) EXCLUSIVE_LOCKS_REQUIRED(cs)
Definition: txmempool.cpp:270
bool CompareTopologically(const TxId &txida, const TxId &txidb) const
Definition: txmempool.cpp:504
TxMempoolInfo info(const TxId &txid) const
Definition: txmempool.cpp:687
size_t DynamicMemoryUsage() const
Definition: txmempool.cpp:815
bool setAvalancheFinalized(const CTxMemPoolEntryRef &tx, const Consensus::Params &params, const CBlockIndex &active_chain_tip, std::vector< TxId > &finalizedTxIds) EXCLUSIVE_LOCKS_REQUIRED(bool isAvalancheFinalizedPreConsensus(const TxId &txid) const EXCLUSIVE_LOCKS_REQUIRED(cs)
Definition: txmempool.h:541
std::vector< TxMempoolInfo > infoAll() const
Definition: txmempool.cpp:536
CTransactionRef GetConflictTx(const COutPoint &prevout) const EXCLUSIVE_LOCKS_REQUIRED(cs)
Get the transaction in the pool that spends the same prevout.
Definition: txmempool.cpp:740
bool exists(const TxId &txid) const
Definition: txmempool.h:530
std::set< TxId > GetUnbroadcastTxs() const
Returns transactions in unbroadcast set.
Definition: txmempool.h:569
auto withOrphanage(Callable &&func) const EXCLUSIVE_LOCKS_REQUIRED(!cs_orphanage)
Definition: txmempool.h:590
const CFeeRate m_min_relay_feerate
Definition: txmempool.h:356
auto withConflicting(Callable &&func) const EXCLUSIVE_LOCKS_REQUIRED(!cs_conflicting)
Definition: txmempool.h:598
void removeForFinalizedBlock(const std::unordered_set< TxId, SaltedTxIdHasher > &confirmedTxIdsInNonFinalizedBlocks) EXCLUSIVE_LOCKS_REQUIRED(cs)
Definition: txmempool.cpp:329
unsigned long size() const
Definition: txmempool.h:500
std::optional< txiter > GetIter(const TxId &txid) const EXCLUSIVE_LOCKS_REQUIRED(cs)
Returns an iterator to the given txid, if found.
Definition: txmempool.cpp:745
virtual void NewPoWValidBlock(const CBlockIndex *pindex, const std::shared_ptr< const CBlock > &block)
Notifies listeners that a block which builds directly on our current tip has been received and connec...
virtual void BlockConnected(ChainstateRole role, const std::shared_ptr< const CBlock > &block, const CBlockIndex *pindex)
Notifies listeners of a block being connected.
virtual void BlockChecked(const CBlock &, const BlockValidationState &)
Notifies listeners of a block validation result.
virtual void UpdatedBlockTip(const CBlockIndex *pindexNew, const CBlockIndex *pindexFork, bool fInitialDownload)
Notifies listeners when the block chain tip advances.
virtual void BlockDisconnected(const std::shared_ptr< const CBlock > &block, const CBlockIndex *pindex)
Notifies listeners of a block being disconnected.
Provides an interface for creating and interacting with one or two chainstates: an IBD chainstate gen...
Definition: validation.h:1186
SnapshotCompletionResult MaybeCompleteSnapshotValidation() EXCLUSIVE_LOCKS_REQUIRED(const CBlockIndex *GetSnapshotBaseBlock() const EXCLUSIVE_LOCKS_REQUIRED(Chainstate ActiveChainstate)() const
Once the background validation chainstate has reached the height which is the base of the UTXO snapsh...
Definition: validation.h:1437
const CBlockIndex * GetBackgroundSyncTip() const EXCLUSIVE_LOCKS_REQUIRED(GetMutex())
The tip of the background sync chain.
Definition: validation.h:1457
MempoolAcceptResult ProcessTransaction(const CTransactionRef &tx, bool test_accept=false) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
Try to add a transaction to the memory pool.
bool IsInitialBlockDownload() const
Check whether we are doing an initial block download (synchronizing from disk or network)
bool ProcessNewBlock(const std::shared_ptr< const CBlock > &block, bool force_processing, bool min_pow_checked, bool *new_block, avalanche::Processor *const avalanche=nullptr) LOCKS_EXCLUDED(cs_main)
Process an incoming block.
RecursiveMutex & GetMutex() const LOCK_RETURNED(
Alias for cs_main.
Definition: validation.h:1318
CBlockIndex * ActiveTip() const EXCLUSIVE_LOCKS_REQUIRED(GetMutex())
Definition: validation.h:1444
bool BackgroundSyncInProgress() const EXCLUSIVE_LOCKS_REQUIRED(GetMutex())
The state of a background sync (for net processing)
Definition: validation.h:1451
bool ProcessNewBlockHeaders(const std::vector< CBlockHeader > &block, bool min_pow_checked, BlockValidationState &state, const CBlockIndex **ppindex=nullptr, const std::optional< CCheckpointData > &test_checkpoints=std::nullopt) LOCKS_EXCLUDED(cs_main)
Process incoming block headers.
const arith_uint256 & MinimumChainWork() const
Definition: validation.h:1288
CChain & ActiveChain() const EXCLUSIVE_LOCKS_REQUIRED(GetMutex())
Definition: validation.h:1438
void MaybeRebalanceCaches() EXCLUSIVE_LOCKS_REQUIRED(void ReportHeadersPresync(const arith_uint256 &work, int64_t height, int64_t timestamp)
Check to see if caches are out of balance and if so, call ResizeCoinsCaches() as needed.
node::BlockManager m_blockman
A single BlockManager instance is shared across each constructed chainstate to avoid duplicating bloc...
Definition: validation.h:1327
Definition: config.h:19
virtual uint64_t GetMaxBlockSize() const =0
bool empty() const
Definition: streams.h:208
size_type size() const
Definition: streams.h:207
void ignore(size_t num_ignore)
Definition: streams.h:332
int in_avail() const
Definition: streams.h:311
Fast randomness source.
Definition: random.h:411
uint64_t rand64() noexcept
Generate a random 64-bit integer.
Definition: random.h:432
A writer stream (for serialization) that computes a 256-bit hash.
Definition: hash.h:100
HeadersSyncState:
Definition: headerssync.h:98
@ FINAL
We're done syncing with this peer and can discard any remaining state.
@ PRESYNC
PRESYNC means the peer has not yet demonstrated their chain has sufficient work and we're only buildi...
size_t Count(NodeId peer) const
Count how many announcements a peer has (REQUESTED, CANDIDATE, and COMPLETED combined).
Definition: invrequest.h:309
size_t CountInFlight(NodeId peer) const
Count how many REQUESTED announcements a peer has.
Definition: invrequest.h:296
Interface for message handling.
Definition: net.h:773
static Mutex g_msgproc_mutex
Mutex for anything that is only accessed via the msg processing thread.
Definition: net.h:778
virtual bool ProcessMessages(const Config &config, CNode *pnode, std::atomic< bool > &interrupt) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex)=0
Process protocol messages received from a given node.
virtual bool SendMessages(const Config &config, CNode *pnode) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex)=0
Send queued protocol messages to a given node.
virtual void InitializeNode(const Config &config, CNode &node, ServiceFlags our_services)=0
Initialize a peer (setup state, queue any initial messages)
virtual void FinalizeNode(const Config &config, const CNode &node)=0
Handle removal of a peer (clear state)
static bool HasFlag(NetPermissionFlags flags, NetPermissionFlags f)
ReadStatus InitData(const CBlockHeaderAndShortTxIDs &cmpctblock, const std::vector< std::pair< TxHash, CTransactionRef > > &extra_txn)
bool IsTxAvailable(size_t index) const
ReadStatus FillBlock(CBlock &block, const std::vector< CTransactionRef > &vtx_missing)
virtual std::optional< std::string > FetchBlock(const Config &config, NodeId peer_id, const CBlockIndex &block_index)=0
Attempt to manually fetch block from a given peer.
virtual void SendPings()=0
Send ping message to all peers.
static std::unique_ptr< PeerManager > make(CConnman &connman, AddrMan &addrman, BanMan *banman, ChainstateManager &chainman, CTxMemPool &pool, avalanche::Processor *const avalanche, Options opts)
virtual void ProcessMessage(const Config &config, CNode &pfrom, const std::string &msg_type, CDataStream &vRecv, const std::chrono::microseconds time_received, const std::atomic< bool > &interruptMsgProc) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex)=0
Process a single message from a peer.
virtual void StartScheduledTasks(CScheduler &scheduler)=0
Begin running background tasks, should only be called once.
virtual bool IgnoresIncomingTxs()=0
Whether this node ignores txs received over p2p.
virtual bool GetNodeStateStats(NodeId nodeid, CNodeStateStats &stats) const =0
Get statistics from node state.
virtual void UnitTestMisbehaving(const NodeId peer_id)=0
Public for unit testing.
virtual void UpdateLastBlockAnnounceTime(NodeId node, int64_t time_in_seconds)=0
This function is used for testing the stale tip eviction logic, see denialofservice_tests....
virtual void CheckForStaleTipAndEvictPeers()=0
Evict extra outbound peers.
static RCUPtr make(Args &&...args)
Construct a new object that is owned by the pointer.
Definition: rcu.h:112
I randrange(I range) noexcept
Generate a random integer in the range [0..range), with range > 0.
Definition: random.h:266
A Span is an object that can refer to a contiguous sequence of objects.
Definition: span.h:94
int EraseTx(const TxId &txid) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
Erase a tx by txid.
Definition: txpool.cpp:50
void EraseForPeer(NodeId peer) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
Erase all txs announced by a peer (eg, after that peer disconnects)
Definition: txpool.cpp:94
std::vector< CTransactionRef > GetChildrenFromSamePeer(const CTransactionRef &parent, NodeId nodeid) const EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
Get all children that spend from this tx and were received from nodeid.
Definition: txpool.cpp:281
bool AddTx(const CTransactionRef &tx, NodeId peer) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
Add a new transaction to the pool.
Definition: txpool.cpp:15
unsigned int LimitTxs(unsigned int max_txs, FastRandomContext &rng) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
Limit the txs to the given maximum.
Definition: txpool.cpp:115
void EraseForBlock(const CBlock &block) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
Erase all txs included in or invalidated by a new block.
Definition: txpool.cpp:239
std::vector< CTransactionRef > GetConflictTxs(const CTransactionRef &tx) const EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
Definition: txpool.cpp:191
void AddChildrenToWorkSet(const CTransaction &tx) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
Add any tx that list a particular tx as a parent into the from peer's work set.
Definition: txpool.cpp:151
std::vector< std::pair< CTransactionRef, NodeId > > GetChildrenFromDifferentPeer(const CTransactionRef &parent, NodeId nodeid) const EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
Get all children that spend from this tx but were not received from nodeid.
Definition: txpool.cpp:326
bool IsValid() const
Definition: validation.h:119
std::string GetRejectReason() const
Definition: validation.h:123
Result GetResult() const
Definition: validation.h:122
std::string ToString() const
Definition: validation.h:125
bool IsInvalid() const
Definition: validation.h:120
256-bit unsigned big integer.
const std::vector< PrefilledProof > & getPrefilledProofs() const
Definition: compactproofs.h:76
uint64_t getShortID(const ProofId &proofid) const
const std::vector< uint64_t > & getShortIDs() const
Definition: compactproofs.h:79
ProofId getProofId() const
Definition: delegation.cpp:56
bool verify(DelegationState &state, CPubKey &auth) const
Definition: delegation.cpp:73
const DelegationId & getId() const
Definition: delegation.h:60
const LimitedProofId & getLimitedProofId() const
Definition: delegation.h:61
bool shouldRequestMoreNodes()
Returns true if we encountered a lack of node since the last call.
Definition: peermanager.h:336
bool exists(const ProofId &proofid) const
Return true if the (valid) proof exists, but only for non-dangling proofs.
Definition: peermanager.h:411
bool forPeer(const ProofId &proofid, Callable &&func) const
Definition: peermanager.h:419
bool addNode(NodeId nodeid, const ProofId &proofid)
Node API.
Definition: peermanager.cpp:34
void removeUnbroadcastProof(const ProofId &proofid)
const ProofRadixTree & getShareableProofsSnapshot() const
Definition: peermanager.h:526
bool isBoundToPeer(const ProofId &proofid) const
bool saveRemoteProof(const ProofId &proofid, const NodeId nodeid, const bool present)
void forEachPeer(Callable &&func) const
Definition: peermanager.h:425
void setInvalid(const ProofId &proofid)
bool isInvalid(const ProofId &proofid) const
bool isImmature(const ProofId &proofid) const
auto getUnbroadcastProofs() const
Definition: peermanager.h:441
bool isInConflictingPool(const ProofId &proofid) const
void sendResponse(CNode *pfrom, Response response) const
Definition: processor.cpp:545
bool addToReconcile(const AnyVoteItem &item) EXCLUSIVE_LOCKS_REQUIRED(!cs_finalizedItems)
Definition: processor.cpp:428
bool isStakingPreconsensusActivated(const CBlockIndex *pprev) const
Definition: processor.cpp:1518
int64_t getAvaproofsNodeCounter() const
Definition: processor.h:346
bool sendHello(CNode *pfrom) EXCLUSIVE_LOCKS_REQUIRED(!cs_delayedAvahelloNodeIds)
Send a avahello message.
Definition: processor.cpp:745
void setRecentlyFinalized(const uint256 &itemId) EXCLUSIVE_LOCKS_REQUIRED(!cs_finalizedItems)
Definition: processor.cpp:507
bool isQuorumEstablished() LOCKS_EXCLUDED(cs_main) EXCLUSIVE_LOCKS_REQUIRED(!cs_peerManager
Definition: processor.cpp:832
void cleanupStakingRewards(const int minHeight) EXCLUSIVE_LOCKS_REQUIRED(!cs_stakingRewards
Definition: processor.cpp:977
ProofRef getLocalProof() const
Definition: processor.cpp:767
void acceptStakeContender(const StakeContenderId &contenderId) EXCLUSIVE_LOCKS_REQUIRED(!cs_peerManager)
Definition: processor.cpp:1095
bool reconcileOrFinalize(const ProofRef &proof) EXCLUSIVE_LOCKS_REQUIRED(!cs_peerManager
Wrapper around the addToReconcile for proofs that adds back the finalization flag to the peer if it i...
Definition: processor.cpp:446
int getStakeContenderStatus(const StakeContenderId &contenderId) const EXCLUSIVE_LOCKS_REQUIRED(!cs_peerManager
Track votes on stake contenders.
Definition: processor.cpp:1072
void sendDelayedAvahello() EXCLUSIVE_LOCKS_REQUIRED(!cs_delayedAvahelloNodeIds)
Definition: processor.cpp:750
void finalizeStakeContender(const StakeContenderId &contenderId) EXCLUSIVE_LOCKS_REQUIRED(!cs_peerManager
Definition: processor.cpp:1100
bool isPreconsensusActivated(const CBlockIndex *pprev) const
Definition: processor.cpp:1514
auto withPeerManager(Callable &&func) const EXCLUSIVE_LOCKS_REQUIRED(!cs_peerManager)
Definition: processor.h:308
bool registerVotes(NodeId nodeid, const Response &response, std::vector< VoteItemUpdate > &updates, bool &disconnect, std::string &error) EXCLUSIVE_LOCKS_REQUIRED(!cs_peerManager
Definition: processor.cpp:552
void rejectStakeContender(const StakeContenderId &contenderId) EXCLUSIVE_LOCKS_REQUIRED(!cs_peerManager)
Definition: processor.cpp:1122
void avaproofsSent(NodeId nodeid) LOCKS_EXCLUDED(cs_main) EXCLUSIVE_LOCKS_REQUIRED(!cs_peerManager)
Definition: processor.cpp:811
std::vector< uint32_t > indices
std::string ToString() const
Definition: uint256.h:80
bool IsNull() const
Definition: uint256.h:32
std::string GetHex() const
Definition: uint256.cpp:16
Generate a new block, without valid proof-of-work.
Definition: miner.h:55
bool ReadBlockFromDisk(CBlock &block, const FlatFilePos &pos) const
Functions for disk access for blocks.
CBlockIndex * LookupBlockIndex(const BlockHash &hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
bool ReadRawBlockFromDisk(std::vector< uint8_t > &block, const FlatFilePos &pos) const
bool LoadingBlocks() const
Definition: blockstorage.h:368
bool IsPruneMode() const
Whether running in -prune mode.
Definition: blockstorage.h:359
256-bit opaque blob.
Definition: uint256.h:129
static const uint256 ZERO
Definition: uint256.h:134
@ BLOCK_CHECKPOINT
the block failed to meet one of our checkpoints
@ BLOCK_HEADER_LOW_WORK
the block header may be on a too-little-work chain
@ BLOCK_INVALID_HEADER
invalid proof of work or time too old
@ BLOCK_CACHED_INVALID
this block was cached as being invalid and we didn't store the reason why
@ BLOCK_CONSENSUS
invalid by consensus rules (excluding any below reasons)
@ BLOCK_MISSING_PREV
We don't have the previous block the checked one is built on.
@ BLOCK_INVALID_PREV
A block this one builds on is invalid.
@ BLOCK_MUTATED
the block's data didn't match the data committed to by the PoW
@ BLOCK_TIME_FUTURE
block timestamp was > 2 hours in the future (or our clock is bad)
@ BLOCK_RESULT_UNSET
initial value. Block has not yet been rejected
@ TX_MISSING_INPUTS
transaction was missing some of its inputs
@ TX_CHILD_BEFORE_PARENT
This tx outputs are already spent in the mempool.
@ TX_MEMPOOL_POLICY
violated mempool's fee/size/descendant/etc limits
@ TX_PACKAGE_RECONSIDERABLE
fails some policy, but might be acceptable if submitted in a (different) package
@ TX_UNKNOWN
transaction was not validated because package failed
@ TX_PREMATURE_SPEND
transaction spends a coinbase too early, or violates locktime/sequence locks
@ TX_DUPLICATE
Tx already in mempool or in the chain.
@ TX_INPUTS_NOT_STANDARD
inputs failed policy rules
@ TX_CONFLICT
Tx conflicts with a finalized tx, i.e.
@ TX_NOT_STANDARD
otherwise didn't meet our local policy rules
@ TX_AVALANCHE_RECONSIDERABLE
fails some policy, but might be reconsidered by avalanche voting
@ TX_NO_MEMPOOL
this node does not have a mempool so can't validate the transaction
@ TX_RESULT_UNSET
initial value. Tx has not yet been rejected
@ TX_CONSENSUS
invalid by consensus rules
static size_t RecursiveDynamicUsage(const CScript &script)
Definition: core_memusage.h:12
RecursiveMutex cs_main
Mutex to guard access to validation specific variables, such as reading or changing the chainstate.
Definition: cs_main.cpp:7
int64_t NodeId
Definition: eviction.h:16
ChainstateRole
This enum describes the various roles a specific Chainstate instance can take.
Definition: chain.h:14
std::array< uint8_t, CPubKey::SCHNORR_SIZE > SchnorrSig
a Schnorr signature
Definition: key.h:25
bool fLogIPs
Definition: logging.cpp:21
#define LogPrintLevel(category, level,...)
Definition: logging.h:437
#define LogPrint(category,...)
Definition: logging.h:452
#define LogInfo(...)
Definition: logging.h:413
#define LogError(...)
Definition: logging.h:419
#define LogDebug(category,...)
Definition: logging.h:446
#define LogPrintf(...)
Definition: logging.h:424
static void pool cs
@ AVALANCHE
Definition: logging.h:91
@ TXPACKAGES
Definition: logging.h:99
@ NETDEBUG
Definition: logging.h:98
@ MEMPOOLREJ
Definition: logging.h:85
@ MEMPOOL
Definition: logging.h:71
@ NET
Definition: logging.h:69
const char * FILTERLOAD
The filterload message tells the receiving peer to filter all relayed transactions and requested merk...
Definition: protocol.cpp:36
const char * CFHEADERS
cfheaders is a response to a getcfheaders request containing a filter header and a vector of filter h...
Definition: protocol.cpp:48
const char * AVAPROOFSREQ
Request for missing avalanche proofs after an avaproofs message has been processed.
Definition: protocol.cpp:58
const char * CFILTER
cfilter is a response to a getcfilters request containing a single compact filter.
Definition: protocol.cpp:46
const char * BLOCK
The block message transmits a single serialized block.
Definition: protocol.cpp:30
const char * FILTERCLEAR
The filterclear message tells the receiving peer to remove a previously-set bloom filter.
Definition: protocol.cpp:38
const char * HEADERS
The headers message sends one or more block headers to a node which previously requested certain head...
Definition: protocol.cpp:29
const char * ADDRV2
The addrv2 message relays connection information for peers on the network just like the addr message,...
Definition: protocol.cpp:21
const char * SENDHEADERS
Indicates that a node prefers to receive new block announcements via a "headers" message rather than ...
Definition: protocol.cpp:39
const char * AVAPROOFS
The avaproofs message the proof short ids of all the valid proofs that we know.
Definition: protocol.cpp:57
const char * PONG
The pong message replies to a ping message, proving to the pinging node that the ponging node is stil...
Definition: protocol.cpp:34
const char * GETAVAPROOFS
The getavaproofs message requests an avaproofs message that provides the proof short ids of all the v...
Definition: protocol.cpp:56
const char * SENDCMPCT
Contains a 1-byte bool and 8-byte LE version number.
Definition: protocol.cpp:41
const char * GETADDR
The getaddr message requests an addr message from the receiving node, preferably one with lots of IP ...
Definition: protocol.cpp:31
const char * GETCFCHECKPT
getcfcheckpt requests evenly spaced compact filter headers, enabling parallelized download and valida...
Definition: protocol.cpp:49
const char * NOTFOUND
The notfound message is a reply to a getdata message which requested an object the receiving node doe...
Definition: protocol.cpp:35
const char * GETAVAADDR
The getavaaddr message requests an addr message from the receiving node, containing IP addresses of t...
Definition: protocol.cpp:55
const char * CMPCTBLOCK
Contains a CBlockHeaderAndShortTxIDs object - providing a header and list of "short txids".
Definition: protocol.cpp:42
const char * MEMPOOL
The mempool message requests the TXIDs of transactions that the receiving node has verified as valid ...
Definition: protocol.cpp:32
const char * GETCFILTERS
getcfilters requests compact filters for a range of blocks.
Definition: protocol.cpp:45
const char * TX
The tx message transmits a single transaction.
Definition: protocol.cpp:28
const char * AVAHELLO
Contains a delegation and a signature.
Definition: protocol.cpp:51
const char * FILTERADD
The filteradd message tells the receiving peer to add a single element to a previously-set bloom filt...
Definition: protocol.cpp:37
const char * ADDR
The addr (IP address) message relays connection information for peers on the network.
Definition: protocol.cpp:20
const char * VERSION
The version message provides information about the transmitting node to the receiving node at the beg...
Definition: protocol.cpp:18
const char * GETBLOCKS
The getblocks message requests an inv message that provides block header hashes starting from a parti...
Definition: protocol.cpp:26
const char * FEEFILTER
The feefilter message tells the receiving peer not to inv us any txs which do not meet the specified ...
Definition: protocol.cpp:40
const char * GETHEADERS
The getheaders message requests a headers message that provides block headers starting from a particu...
Definition: protocol.cpp:27
const char * AVARESPONSE
Contains an avalanche::Response.
Definition: protocol.cpp:53
const char * GETDATA
The getdata message requests one or more data objects from another node.
Definition: protocol.cpp:24
const char * VERACK
The verack message acknowledges a previously-received version message, informing the connecting node ...
Definition: protocol.cpp:19
const char * BLOCKTXN
Contains a BlockTransactions.
Definition: protocol.cpp:44
const char * GETCFHEADERS
getcfheaders requests a compact filter header and the filter hashes for a range of blocks,...
Definition: protocol.cpp:47
const char * SENDADDRV2
The sendaddrv2 message signals support for receiving ADDRV2 messages (BIP155).
Definition: protocol.cpp:22
const char * PING
The ping message is sent periodically to help confirm that the receiving peer is still connected.
Definition: protocol.cpp:33
const char * AVAPOLL
Contains an avalanche::Poll.
Definition: protocol.cpp:52
const char * MERKLEBLOCK
The merkleblock message is a reply to a getdata message which requested a block using the inventory t...
Definition: protocol.cpp:25
const char * AVAPROOF
Contains an avalanche::Proof.
Definition: protocol.cpp:54
const char * CFCHECKPT
cfcheckpt is a response to a getcfcheckpt request containing a vector of evenly spaced filter headers...
Definition: protocol.cpp:50
const char * GETBLOCKTXN
Contains a BlockTransactionsRequest Peer should respond with "blocktxn" message.
Definition: protocol.cpp:43
const char * INV
The inv message (inventory message) transmits one or more inventories of objects known to the transmi...
Definition: protocol.cpp:23
ShortIdProcessor< PrefilledProof, ShortIdProcessorPrefilledProofAdapter, ProofRefCompare > ProofShortIdProcessor
Definition: compactproofs.h:52
std::variant< const ProofRef, const CBlockIndex *, const StakeContenderId, const CTransactionRef > AnyVoteItem
Definition: processor.h:95
RCUPtr< const Proof > ProofRef
Definition: proof.h:186
Definition: init.h:31
Implement std::hash so RCUPtr can be used as a key for maps or sets.
Definition: rcu.h:259
bool fListen
Definition: net.cpp:128
std::optional< CService > GetLocalAddrForPeer(CNode &node)
Returns a local address that we should advertise to this peer.
Definition: net.cpp:245
std::function< void(const CAddress &addr, const std::string &msg_type, Span< const uint8_t > data, bool is_incoming)> CaptureMessage
Defaults to CaptureMessageToFile(), but can be overridden by unit tests.
Definition: net.cpp:3192
std::string userAgent(const Config &config)
Definition: net.cpp:3140
bool IsReachable(enum Network net)
Definition: net.cpp:327
bool SeenLocal(const CService &addr)
vote for a local address
Definition: net.cpp:337
static const unsigned int MAX_SUBVERSION_LENGTH
Maximum length of the user agent string in version message.
Definition: net.h:68
static constexpr std::chrono::minutes TIMEOUT_INTERVAL
Time after which to disconnect, after waiting for a ping response (or inactivity).
Definition: net.h:62
NetPermissionFlags
static constexpr auto HEADERS_RESPONSE_TIME
How long to wait for a peer to respond to a getheaders request.
static constexpr size_t MAX_ADDR_PROCESSING_TOKEN_BUCKET
The soft limit of the address processing token bucket (the regular MAX_ADDR_RATE_PER_SECOND based inc...
static constexpr size_t MAX_AVALANCHE_STALLED_TXIDS_PER_PEER
Maximum number of stalled avalanche txids to store per peer.
static const int MAX_BLOCKS_IN_TRANSIT_PER_PEER
Number of blocks that can be requested at any given time from a single peer.
static constexpr auto BLOCK_STALLING_TIMEOUT_DEFAULT
Default time during which a peer must stall block download progress before being disconnected.
static constexpr auto GETAVAADDR_INTERVAL
Minimum time between 2 successives getavaaddr messages from the same peer.
static constexpr auto AVG_FEEFILTER_BROADCAST_INTERVAL
Verify that INVENTORY_MAX_RECENT_RELAY is enough to cache everything typically relayed before uncondi...
static constexpr unsigned int INVENTORY_BROADCAST_MAX_PER_MB
Maximum number of inventory items to send per transmission.
static constexpr auto EXTRA_PEER_CHECK_INTERVAL
How frequently to check for extra outbound peers and disconnect.
static const unsigned int BLOCK_DOWNLOAD_WINDOW
Size of the "block download window": how far ahead of our current height do we fetch?...
static uint32_t getAvalancheVoteForProof(const avalanche::Processor &avalanche, const avalanche::ProofId &id)
Decide a response for an Avalanche poll about the given proof.
static constexpr int STALE_RELAY_AGE_LIMIT
Age after which a stale block will no longer be served if requested as protection against fingerprint...
static constexpr int HISTORICAL_BLOCK_AGE
Age after which a block is considered historical for purposes of rate limiting block relay.
static constexpr auto ROTATE_ADDR_RELAY_DEST_INTERVAL
Delay between rotating the peers we relay a particular address to.
static constexpr auto MINIMUM_CONNECT_TIME
Minimum time an outbound-peer-eviction candidate must be connected for, in order to evict.
static constexpr auto CHAIN_SYNC_TIMEOUT
Timeout for (unprotected) outbound peers to sync to our chainwork.
static const unsigned int NODE_NETWORK_LIMITED_MIN_BLOCKS
Minimum blocks required to signal NODE_NETWORK_LIMITED.
static constexpr auto AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL
Average delay between local address broadcasts.
static const int MAX_BLOCKTXN_DEPTH
Maximum depth of blocks we're willing to respond to GETBLOCKTXN requests for.
static constexpr uint64_t CMPCTBLOCKS_VERSION
The compactblocks version we support.
bool IsAvalancheMessageType(const std::string &msg_type)
static constexpr int32_t MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT
Protect at least this many outbound peers from disconnection due to slow/behind headers chain.
static std::chrono::microseconds ComputeRequestTime(const CNode &node, const InvRequestTracker< InvId > &requestTracker, const DataRequestParameters &requestParams, std::chrono::microseconds current_time, bool preferred)
Compute the request time for this announcement, current time plus delays for:
static constexpr auto INBOUND_INVENTORY_BROADCAST_INTERVAL
Average delay between trickled inventory transmissions for inbound peers.
static constexpr DataRequestParameters TX_REQUEST_PARAMS
static constexpr auto MAX_FEEFILTER_CHANGE_DELAY
Maximum feefilter broadcast delay after significant change.
static constexpr uint32_t MAX_GETCFILTERS_SIZE
Maximum number of compact filters that may be requested with one getcfilters.
static constexpr auto HEADERS_DOWNLOAD_TIMEOUT_BASE
Headers download timeout.
static const unsigned int MAX_GETDATA_SZ
Limit to avoid sending big packets.
static constexpr double BLOCK_DOWNLOAD_TIMEOUT_BASE
Block download timeout base, expressed in multiples of the block interval (i.e.
static constexpr auto AVALANCHE_AVAPROOFS_TIMEOUT
If no proof was requested from a compact proof message after this timeout expired,...
static constexpr auto STALE_CHECK_INTERVAL
How frequently to check for stale tips.
static constexpr unsigned int INVENTORY_MAX_RECENT_RELAY
The number of most recently announced transactions a peer can request.
static constexpr auto UNCONDITIONAL_RELAY_DELAY
How long a transaction has to be in the mempool before it can unconditionally be relayed.
static constexpr auto AVG_ADDRESS_BROADCAST_INTERVAL
Average delay between peer address broadcasts.
static const unsigned int MAX_LOCATOR_SZ
The maximum number of entries in a locator.
static constexpr double BLOCK_DOWNLOAD_TIMEOUT_PER_PEER
Additional block download timeout per parallel downloading peer (i.e.
static constexpr double MAX_ADDR_RATE_PER_SECOND
The maximum rate of address records we're willing to process on average.
static constexpr auto PING_INTERVAL
Time between pings automatically sent out for latency probing and keepalive.
static const int MAX_CMPCTBLOCK_DEPTH
Maximum depth of blocks we're willing to serve as compact blocks to peers when requested.
static constexpr DataRequestParameters PROOF_REQUEST_PARAMS
static const unsigned int MAX_BLOCKS_TO_ANNOUNCE
Maximum number of headers to announce when relaying blocks with headers message.
static bool TooManyAnnouncements(const CNode &node, const InvRequestTracker< InvId > &requestTracker, const DataRequestParameters &requestParams)
static constexpr uint32_t MAX_GETCFHEADERS_SIZE
Maximum number of cf hashes that may be requested with one getcfheaders.
static constexpr auto BLOCK_STALLING_TIMEOUT_MAX
Maximum timeout for stalling block download.
static constexpr auto HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER
static constexpr uint64_t RANDOMIZER_ID_ADDRESS_RELAY
SHA256("main address relay")[0:8].
static constexpr size_t MAX_PCT_ADDR_TO_SEND
the maximum percentage of addresses from our addrman to return in response to a getaddr message.
static const unsigned int MAX_INV_SZ
The maximum number of entries in an 'inv' protocol message.
static constexpr unsigned int INVENTORY_BROADCAST_PER_SECOND
Maximum rate of inventory items to send per second.
static constexpr size_t MAX_ADDR_TO_SEND
The maximum number of address records permitted in an ADDR message.
static const unsigned int MAX_CMPCTBLOCKS_INFLIGHT_PER_BLOCK
Maximum number of outstanding CMPCTBLOCK requests for the same block.
static const unsigned int MAX_HEADERS_RESULTS
Number of headers sent in one getheaders result.
static constexpr int ADDRV2_FORMAT
A flag that is ORed into the protocol version to designate that addresses should be serialized in (un...
Definition: netaddress.h:33
bool IsProxy(const CNetAddr &addr)
Definition: netbase.cpp:763
static constexpr NodeId NO_NODE
Special NodeId that represent no node.
Definition: nodeid.h:15
uint256 GetPackageHash(const Package &package)
Definition: packages.cpp:129
std::vector< CTransactionRef > Package
A package is an ordered list of transactions.
Definition: packages.h:40
static constexpr Amount DEFAULT_MIN_RELAY_TX_FEE_PER_KB(1000 *SATOSHI)
Default for -minrelaytxfee, minimum relay fee for transactions.
std::shared_ptr< const CTransaction > CTransactionRef
Definition: transaction.h:315
Response response
Definition: processor.cpp:522
SchnorrSig sig
Definition: processor.cpp:523
static constexpr size_t AVALANCHE_MAX_ELEMENT_POLL
Maximum item that can be polled at once.
Definition: processor.h:54
void SetServiceFlagsIBDCache(bool state)
Set the current IBD status in order to figure out the desirable service flags.
Definition: protocol.cpp:215
ServiceFlags GetDesirableServiceFlags(ServiceFlags services)
Gets the set of service flags which are "desirable" for a given peer.
Definition: protocol.cpp:207
static const unsigned int MAX_PROTOCOL_MESSAGE_LENGTH
Maximum length of incoming protocol messages (Currently 2MB).
Definition: protocol.h:25
static bool HasAllDesirableServiceFlags(ServiceFlags services)
A shortcut for (services & GetDesirableServiceFlags(services)) == GetDesirableServiceFlags(services),...
Definition: protocol.h:428
@ MSG_TX
Definition: protocol.h:566
@ MSG_AVA_STAKE_CONTENDER
Definition: protocol.h:574
@ MSG_AVA_PROOF
Definition: protocol.h:573
@ MSG_BLOCK
Definition: protocol.h:567
@ MSG_CMPCT_BLOCK
Defined in BIP152.
Definition: protocol.h:572
ServiceFlags
nServices flags.
Definition: protocol.h:336
@ NODE_NONE
Definition: protocol.h:339
@ NODE_NETWORK_LIMITED
Definition: protocol.h:366
@ NODE_BLOOM
Definition: protocol.h:353
@ NODE_NETWORK
Definition: protocol.h:343
@ NODE_COMPACT_FILTERS
Definition: protocol.h:361
@ NODE_AVALANCHE
Definition: protocol.h:381
static bool MayHaveUsefulAddressDB(ServiceFlags services)
Checks if a peer with the given service flags may be capable of having a robust address-storage DB.
Definition: protocol.h:436
void Shuffle(I first, I last, R &&rng)
More efficient than using std::shuffle on a FastRandomContext.
Definition: random.h:512
reverse_range< T > reverse_iterate(T &x)
static const unsigned int MAX_SCRIPT_ELEMENT_SIZE
Definition: script.h:25
@ SER_NETWORK
Definition: serialize.h:155
void Unserialize(Stream &, V)=delete
#define LIMITED_STRING(obj, n)
Definition: serialize.h:645
uint64_t ReadCompactSize(Stream &is, bool range_check=true)
Decode a CompactSize-encoded variable-length integer.
Definition: serialize.h:477
constexpr auto MakeUCharSpan(V &&v) -> decltype(UCharSpanCast(Span{std::forward< V >(v)}))
Like the Span constructor, but for (const) uint8_t member types only.
Definition: span.h:350
static const double AVALANCHE_STATISTICS_DECAY_FACTOR
Pre-computed decay factor for the avalanche statistics computation.
Definition: statistics.h:18
static constexpr std::chrono::minutes AVALANCHE_STATISTICS_REFRESH_PERIOD
Refresh period for the avalanche statistics computation.
Definition: statistics.h:11
std::string ToString(const T &t)
Locale-independent version of std::to_string.
Definition: string.h:108
Definition: amount.h:21
static constexpr Amount zero() noexcept
Definition: amount.h:34
A BlockHash is a unqiue identifier for a block.
Definition: blockhash.h:13
Describes a place in the block chain to another node such that if the other node doesn't have the sam...
Definition: block.h:108
std::vector< BlockHash > vHave
Definition: block.h:120
bool IsNull() const
Definition: block.h:135
std::chrono::microseconds m_ping_wait
Amount m_fee_filter_received
std::vector< int > vHeightInFlight
uint64_t m_addr_rate_limited
uint64_t m_addr_processed
int64_t presync_height
ServiceFlags their_services
std::vector< uint8_t > data
Definition: net.h:133
std::string m_type
Definition: net.h:134
Parameters that influence chain consensus.
Definition: params.h:34
int64_t nPowTargetSpacing
Definition: params.h:80
std::chrono::seconds PowTargetSpacing() const
Definition: params.h:82
const std::chrono::seconds overloaded_peer_delay
How long to delay requesting data from overloaded peers (see max_peer_request_in_flight).
const size_t max_peer_announcements
Maximum number of inventories to consider for requesting, per peer.
const std::chrono::seconds nonpref_peer_delay
How long to delay requesting data from non-preferred peers.
const NetPermissionFlags bypass_request_limits_permissions
Permission flags a peer requires to bypass the request limits tracking limits and delay penalty.
const std::chrono::microseconds getdata_interval
How long to wait (in microseconds) before a data request from an additional peer.
const size_t max_peer_request_in_flight
Maximum number of in-flight data requests from a peer.
Validation result for a transaction evaluated by MemPoolAccept (single or package).
Definition: validation.h:213
const ResultType m_result_type
Result type.
Definition: validation.h:224
const TxValidationState m_state
Contains information about why the transaction failed.
Definition: validation.h:227
@ MEMPOOL_ENTRY
Valid, transaction was already in the mempool.
@ VALID
Fully validated, valid.
static time_point now() noexcept
Return current system time or mocked time, if set.
Definition: time.cpp:71
std::chrono::time_point< NodeClock > time_point
Definition: time.h:19
Validation result for package mempool acceptance.
Definition: validation.h:316
PackageValidationState m_state
Definition: validation.h:317
std::map< TxId, MempoolAcceptResult > m_tx_results
Map from txid to finished MempoolAcceptResults.
Definition: validation.h:325
This is a radix tree storing values identified by a unique key.
Definition: radix.h:40
A TxId is the identifier of a transaction.
Definition: txid.h:14
std::chrono::seconds registration_time
Definition: peermanager.h:95
const ProofId & getProofId() const
Definition: peermanager.h:110
ProofRef proof
Definition: peermanager.h:91
StakeContenderIds are unique for each block to ensure that the peer polling for their acceptance has ...
#define AssertLockNotHeld(cs)
Definition: sync.h:163
#define LOCK2(cs1, cs2)
Definition: sync.h:309
#define LOCK(cs)
Definition: sync.h:306
#define WITH_LOCK(cs, code)
Run code while locking a mutex.
Definition: sync.h:357
static int count
Definition: tests.c:31
#define EXCLUSIVE_LOCKS_REQUIRED(...)
Definition: threadsafety.h:56
#define GUARDED_BY(x)
Definition: threadsafety.h:45
#define LOCKS_EXCLUDED(...)
Definition: threadsafety.h:55
#define NO_THREAD_SAFETY_ANALYSIS
Definition: threadsafety.h:58
#define PT_GUARDED_BY(x)
Definition: threadsafety.h:46
int64_t GetTime()
DEPRECATED Use either ClockType::now() or Now<TimePointType>() if a cast is needed.
Definition: time.cpp:105
constexpr int64_t count_microseconds(std::chrono::microseconds t)
Definition: time.h:63
constexpr int64_t count_seconds(std::chrono::seconds t)
Definition: time.h:57
std::chrono::time_point< NodeClock, std::chrono::seconds > NodeSeconds
Definition: time.h:25
double CountSecondsDouble(SecondsDouble t)
Helper to count the seconds in any std::chrono::duration type.
Definition: time.h:76
NodeClock::time_point GetAdjustedTime()
Definition: timedata.cpp:35
void AddTimeData(const CNetAddr &ip, int64_t nOffsetSample)
Definition: timedata.cpp:45
#define strprintf
Format arguments and return the string or write to given std::ostream (see tinyformat::format doc for...
Definition: tinyformat.h:1202
#define TRACE6(context, event, a, b, c, d, e, f)
Definition: trace.h:45
@ AVALANCHE
Removed by avalanche vote.
std::string SanitizeString(std::string_view str, int rule)
Remove unsafe chars.
arith_uint256 CalculateHeadersWork(const std::vector< CBlockHeader > &headers)
Return the sum of the work on a given set of headers.
bool HasValidProofOfWork(const std::vector< CBlockHeader > &headers, const Consensus::Params &consensusParams)
Check with the proof of work on each blockheader matches the value in nBits.
PackageMempoolAcceptResult ProcessNewPackage(Chainstate &active_chainstate, CTxMemPool &pool, const Package &package, bool test_accept)
Validate (and maybe submit) a package to the mempool.
bool IsBlockMutated(const CBlock &block)
Check if a block has been mutated (with respect to its merkle root).
std::vector< Coin > GetSpentCoins(const CTransactionRef &ptx, const CCoinsViewCache &coins_view)
Get the coins spent by ptx from the coins_view.
AssertLockHeld(pool.cs)
assert(!tx.IsCoinBase())
static const unsigned int MIN_BLOCKS_TO_KEEP
Block files containing a block-height within MIN_BLOCKS_TO_KEEP of ActiveChain().Tip() will not be pr...
Definition: validation.h:100
CMainSignals & GetMainSignals()
static const int INIT_PROTO_VERSION
initial proto version, to be increased after version/verack negotiation
Definition: version.h:14
static const int SHORT_IDS_BLOCKS_VERSION
short-id-based block download starts with this version
Definition: version.h:35
static const int SENDHEADERS_VERSION
"sendheaders" command and announcing blocks with headers starts with this version
Definition: version.h:28
static const int PROTOCOL_VERSION
network protocol versioning
Definition: version.h:11
static const int FEEFILTER_VERSION
"feefilter" tells peers to filter invs to you by fee starts with this version
Definition: version.h:32
static const int MIN_PEER_PROTO_VERSION
disconnect from peers older than this proto version
Definition: version.h:17
static const int INVALID_CB_NO_BAN_VERSION
not banning for invalid compact blocks starts with this version
Definition: version.h:38
static const int BIP0031_VERSION
BIP 0031, pong message, is enabled for all versions AFTER this one.
Definition: version.h:20