Bitcoin ABC 0.30.7
P2P Digital Currency
peermanager.cpp
Go to the documentation of this file.
1// Copyright (c) 2020 The Bitcoin developers
2// Distributed under the MIT software license, see the accompanying
3// file COPYING or http://www.opensource.org/licenses/mit-license.php.
4
6
11#include <cashaddrenc.h>
12#include <common/args.h>
14#include <logging.h>
15#include <random.h>
16#include <scheduler.h>
17#include <threadsafety.h>
18#include <uint256.h>
19#include <util/fastrange.h>
20#include <util/fs_helpers.h>
21#include <util/time.h>
22#include <validation.h> // For ChainstateManager
23
24#include <algorithm>
25#include <cassert>
26#include <limits>
27
28namespace avalanche {
29static constexpr uint64_t PEERS_DUMP_VERSION{1};
30
31bool PeerManager::addNode(NodeId nodeid, const ProofId &proofid) {
32 auto &pview = peers.get<by_proofid>();
33 auto it = pview.find(proofid);
34 if (it == pview.end()) {
35 // If the node exists, it is actually updating its proof to an unknown
36 // one. In this case we need to remove it so it is not both active and
37 // pending at the same time.
38 removeNode(nodeid);
39 pendingNodes.emplace(proofid, nodeid);
40 return false;
41 }
42
43 return addOrUpdateNode(peers.project<0>(it), nodeid);
44}
45
46bool PeerManager::addOrUpdateNode(const PeerSet::iterator &it, NodeId nodeid) {
47 assert(it != peers.end());
48
49 const PeerId peerid = it->peerid;
50
51 auto nit = nodes.find(nodeid);
52 if (nit == nodes.end()) {
53 if (!nodes.emplace(nodeid, peerid).second) {
54 return false;
55 }
56 } else {
57 const PeerId oldpeerid = nit->peerid;
58 if (!nodes.modify(nit, [&](Node &n) { n.peerid = peerid; })) {
59 return false;
60 }
61
62 // We actually have this node already, we need to update it.
63 bool success = removeNodeFromPeer(peers.find(oldpeerid));
64 assert(success);
65 }
66
67 // Then increase the node counter, and create the slot if needed
68 bool success = addNodeToPeer(it);
69 assert(success);
70
71 // If the added node was in the pending set, remove it
72 pendingNodes.get<by_nodeid>().erase(nodeid);
73
74 // If the proof was in the dangling pool, remove it
75 const ProofId &proofid = it->getProofId();
76 if (danglingProofPool.getProof(proofid)) {
78 }
79
80 // We know for sure there is at least 1 node. Note that this can fail if
81 // there is more than 1, in this case it's a no-op.
82 shareableProofs.insert(it->proof);
83
84 return true;
85}
86
87bool PeerManager::addNodeToPeer(const PeerSet::iterator &it) {
88 assert(it != peers.end());
89 return peers.modify(it, [&](Peer &p) {
90 if (p.node_count++ > 0) {
91 // We are done.
92 return;
93 }
94
95 // We need to allocate this peer.
96 p.index = uint32_t(slots.size());
97 const uint32_t score = p.getScore();
98 const uint64_t start = slotCount;
99 slots.emplace_back(start, score, it->peerid);
100 slotCount = start + score;
101
102 // Add to our allocated score when we allocate a new peer in the slots
103 connectedPeersScore += score;
104 });
105}
106
108 // Remove all the remote proofs from this node
109 auto &remoteProofsView = remoteProofs.get<by_nodeid>();
110 auto [begin, end] = remoteProofsView.equal_range(nodeid);
111 remoteProofsView.erase(begin, end);
112
113 if (pendingNodes.get<by_nodeid>().erase(nodeid) > 0) {
114 // If this was a pending node, there is nothing else to do.
115 return true;
116 }
117
118 auto it = nodes.find(nodeid);
119 if (it == nodes.end()) {
120 return false;
121 }
122
123 const PeerId peerid = it->peerid;
124 nodes.erase(it);
125
126 // Keep the track of the reference count.
127 bool success = removeNodeFromPeer(peers.find(peerid));
128 assert(success);
129
130 return true;
131}
132
133bool PeerManager::removeNodeFromPeer(const PeerSet::iterator &it,
134 uint32_t count) {
135 // It is possible for nodes to be dangling. If there was an inflight query
136 // when the peer gets removed, the node was not erased. In this case there
137 // is nothing to do.
138 if (it == peers.end()) {
139 return true;
140 }
141
142 assert(count <= it->node_count);
143 if (count == 0) {
144 // This is a NOOP.
145 return false;
146 }
147
148 const uint32_t new_count = it->node_count - count;
149 if (!peers.modify(it, [&](Peer &p) { p.node_count = new_count; })) {
150 return false;
151 }
152
153 if (new_count > 0) {
154 // We are done.
155 return true;
156 }
157
158 // There are no more nodes left, we need to clean up. Remove from the radix
159 // tree (unless it's our local proof), subtract allocated score and remove
160 // from slots.
161 if (!localProof || it->getProofId() != localProof->getId()) {
162 const auto removed = shareableProofs.remove(it->getProofId());
163 assert(removed);
164 }
165
166 const size_t i = it->index;
167 assert(i < slots.size());
168 assert(connectedPeersScore >= slots[i].getScore());
169 connectedPeersScore -= slots[i].getScore();
170
171 if (i + 1 == slots.size()) {
172 slots.pop_back();
173 slotCount = slots.empty() ? 0 : slots.back().getStop();
174 } else {
175 fragmentation += slots[i].getScore();
176 slots[i] = slots[i].withPeerId(NO_PEER);
177 }
178
179 return true;
180}
181
183 SteadyMilliseconds timeout) {
184 auto it = nodes.find(nodeid);
185 if (it == nodes.end()) {
186 return false;
187 }
188
189 return nodes.modify(it, [&](Node &n) { n.nextRequestTime = timeout; });
190}
191
193 auto it = nodes.find(nodeid);
194 if (it == nodes.end()) {
195 return false;
196 }
197
198 return !it->avaproofsSent &&
199 nodes.modify(it, [&](Node &n) { n.avaproofsSent = true; });
200}
201
202static bool isImmatureState(const ProofValidationState &state) {
204}
205
207 PeerId peerid, const std::chrono::seconds &nextTime) {
208 auto it = peers.find(peerid);
209 if (it == peers.end()) {
210 // No such peer
211 return false;
212 }
213
214 // Make sure we don't move the time in the past.
215 peers.modify(it, [&](Peer &p) {
217 std::max(p.nextPossibleConflictTime, nextTime);
218 });
219
220 return it->nextPossibleConflictTime == nextTime;
221}
222
224 auto it = peers.find(peerid);
225 if (it == peers.end()) {
226 // No such peer
227 return false;
228 }
229
230 peers.modify(it, [&](Peer &p) { p.hasFinalized = true; });
231
232 return true;
233}
234
235template <typename ProofContainer>
236void PeerManager::moveToConflictingPool(const ProofContainer &proofs) {
237 auto &peersView = peers.get<by_proofid>();
238 for (const ProofRef &proof : proofs) {
239 auto it = peersView.find(proof->getId());
240 if (it != peersView.end()) {
241 removePeer(it->peerid);
242 }
243
245 }
246}
247
249 ProofRegistrationState &registrationState,
250 RegistrationMode mode) {
251 assert(proof);
252
253 const ProofId &proofid = proof->getId();
254
255 auto invalidate = [&](ProofRegistrationResult result,
256 const std::string &message) {
257 return registrationState.Invalid(
258 result, message, strprintf("proofid: %s", proofid.ToString()));
259 };
260
261 if ((mode != RegistrationMode::FORCE_ACCEPT ||
262 !isInConflictingPool(proofid)) &&
263 exists(proofid)) {
264 // In default mode, we expect the proof to be unknown, i.e. in none of
265 // the pools.
266 // In forced accept mode, the proof can be in the conflicting pool.
268 "proof-already-registered");
269 }
270
271 if (danglingProofPool.getProof(proofid) &&
272 pendingNodes.count(proofid) == 0) {
273 // Don't attempt to register a proof that we already evicted because it
274 // was dangling, but rather attempt to retrieve an associated node.
275 needMoreNodes = true;
276 return invalidate(ProofRegistrationResult::DANGLING, "dangling-proof");
277 }
278
279 // Check the proof's validity.
280 ProofValidationState validationState;
281 if (!WITH_LOCK(cs_main, return proof->verify(stakeUtxoDustThreshold,
282 chainman, validationState))) {
283 if (isImmatureState(validationState)) {
287 // Adding this proof exceeds the immature pool limit, so evict
288 // the lowest scoring proof.
291 }
292
293 return invalidate(ProofRegistrationResult::IMMATURE,
294 "immature-proof");
295 }
296
297 if (validationState.GetResult() ==
300 "utxo-missing-or-spent");
301 }
302
303 // Reject invalid proof.
304 return invalidate(ProofRegistrationResult::INVALID, "invalid-proof");
305 }
306
307 auto now = GetTime<std::chrono::seconds>();
308 auto nextCooldownTimePoint =
309 now + std::chrono::seconds(gArgs.GetIntArg(
310 "-avalancheconflictingproofcooldown",
312
313 ProofPool::ConflictingProofSet conflictingProofs;
314 switch (validProofPool.addProofIfNoConflict(proof, conflictingProofs)) {
315 case ProofPool::AddProofStatus::REJECTED: {
316 if (mode != RegistrationMode::FORCE_ACCEPT) {
317 auto bestPossibleConflictTime = std::chrono::seconds(0);
318 auto &pview = peers.get<by_proofid>();
319 for (auto &conflictingProof : conflictingProofs) {
320 auto it = pview.find(conflictingProof->getId());
321 assert(it != pview.end());
322
323 // Search the most recent time over the peers
324 bestPossibleConflictTime = std::max(
325 bestPossibleConflictTime, it->nextPossibleConflictTime);
326
328 nextCooldownTimePoint);
329 }
330
331 if (bestPossibleConflictTime > now) {
332 // Cooldown not elapsed, reject the proof.
333 return invalidate(
335 "cooldown-not-elapsed");
336 }
337
338 // Give the proof a chance to replace the conflicting ones.
340 // If we have overridden other proofs due to conflict,
341 // remove the peers and attempt to move them to the
342 // conflicting pool.
343 moveToConflictingPool(conflictingProofs);
344
345 // Replacement is successful, continue to peer creation
346 break;
347 }
348
349 // Not the preferred proof, or replacement is not enabled
351 ProofPool::AddProofStatus::REJECTED
353 "rejected-proof")
355 "conflicting-utxos");
356 }
357
359
360 // Move the conflicting proofs from the valid pool to the
361 // conflicting pool
362 moveToConflictingPool(conflictingProofs);
363
364 auto status = validProofPool.addProofIfNoConflict(proof);
365 assert(status == ProofPool::AddProofStatus::SUCCEED);
366
367 break;
368 }
369 case ProofPool::AddProofStatus::DUPLICATED:
370 // If the proof was already in the pool, don't duplicate the peer.
372 "proof-already-registered");
373 case ProofPool::AddProofStatus::SUCCEED:
374 break;
375
376 // No default case, so the compiler can warn about missing cases
377 }
378
379 // At this stage we are going to create a peer so the proof should never
380 // exist in the conflicting pool, but use belt and suspenders.
382
383 // New peer means new peerid!
384 const PeerId peerid = nextPeerId++;
385
386 // We have no peer for this proof, time to create it.
387 auto inserted = peers.emplace(peerid, proof, nextCooldownTimePoint);
388 assert(inserted.second);
389
390 if (localProof && proof->getId() == localProof->getId()) {
391 // Add it to the shareable proofs even if there is no node, we are the
392 // node. Otherwise it will be inserted after a node is attached to the
393 // proof.
394 shareableProofs.insert(proof);
395 }
396
397 // Add to our registered score when adding to the peer list
398 totalPeersScore += proof->getScore();
399
400 // If there are nodes waiting for this proof, add them
401 auto &pendingNodesView = pendingNodes.get<by_proofid>();
402 auto range = pendingNodesView.equal_range(proofid);
403
404 // We want to update the nodes then remove them from the pending set. That
405 // will invalidate the range iterators, so we need to save the node ids
406 // first before we can loop over them.
407 std::vector<NodeId> nodeids;
408 nodeids.reserve(std::distance(range.first, range.second));
409 std::transform(range.first, range.second, std::back_inserter(nodeids),
410 [](const PendingNode &n) { return n.nodeid; });
411
412 for (const NodeId &nodeid : nodeids) {
413 addOrUpdateNode(inserted.first, nodeid);
414 }
415
416 return true;
417}
418
420 if (isDangling(proofid) && mode == RejectionMode::INVALIDATE) {
422 return true;
423 }
424
425 if (!exists(proofid)) {
426 return false;
427 }
428
429 if (immatureProofPool.removeProof(proofid)) {
430 return true;
431 }
432
433 if (mode == RejectionMode::DEFAULT &&
435 // In default mode we keep the proof in the conflicting pool
436 return true;
437 }
438
439 if (mode == RejectionMode::INVALIDATE &&
441 // In invalidate mode we remove the proof completely
442 return true;
443 }
444
445 auto &pview = peers.get<by_proofid>();
446 auto it = pview.find(proofid);
447 assert(it != pview.end());
448
449 const ProofRef proof = it->proof;
450
451 if (!removePeer(it->peerid)) {
452 return false;
453 }
454
455 // If there was conflicting proofs, attempt to pull them back
456 for (const SignedStake &ss : proof->getStakes()) {
457 const ProofRef conflictingProof =
458 conflictingProofPool.getProof(ss.getStake().getUTXO());
459 if (!conflictingProof) {
460 continue;
461 }
462
463 conflictingProofPool.removeProof(conflictingProof->getId());
464 registerProof(conflictingProof);
465 }
466
467 if (mode == RejectionMode::DEFAULT) {
469 }
470
471 return true;
472}
473
475 std::unordered_set<ProofRef, SaltedProofHasher> &registeredProofs) {
476 registeredProofs.clear();
477 const auto now = GetTime<std::chrono::seconds>();
478
479 std::vector<ProofRef> newlyDanglingProofs;
480 for (const Peer &peer : peers) {
481 // If the peer is not our local proof, has been registered for some
482 // time and has no node attached, discard it.
483 if ((!localProof || peer.getProofId() != localProof->getId()) &&
484 peer.node_count == 0 &&
485 (peer.registration_time + Peer::DANGLING_TIMEOUT) <= now) {
486 // Check the remotes status to determine if we should set the proof
487 // as dangling. This prevents from dropping a proof on our own due
488 // to a network issue. If the remote presence status is inconclusive
489 // we assume our own position (missing = false).
490 if (!getRemotePresenceStatus(peer.getProofId()).value_or(false)) {
491 newlyDanglingProofs.push_back(peer.proof);
492 }
493 }
494 }
495
496 // Similarly, check if we have dangling proofs that could be pulled back
497 // because the network says so.
498 std::vector<ProofRef> previouslyDanglingProofs;
499 danglingProofPool.forEachProof([&](const ProofRef &proof) {
500 if (getRemotePresenceStatus(proof->getId()).value_or(false)) {
501 previouslyDanglingProofs.push_back(proof);
502 }
503 });
504 for (const ProofRef &proof : previouslyDanglingProofs) {
505 danglingProofPool.removeProof(proof->getId());
506 if (registerProof(proof)) {
507 registeredProofs.insert(proof);
508 }
509 }
510
511 for (const ProofRef &proof : newlyDanglingProofs) {
512 rejectProof(proof->getId(), RejectionMode::INVALIDATE);
514 // If the proof is added, it means there is no better conflicting
515 // dangling proof and this is not a duplicated, so it's worth
516 // printing a message to the log.
518 "Proof dangling for too long (no connected node): %s\n",
519 proof->getId().GetHex());
520 }
521 }
522
523 // If we have dangling proof, this is a good indicator that we need to
524 // request more nodes from our peers.
525 needMoreNodes = !newlyDanglingProofs.empty();
526}
527
529 for (int retry = 0; retry < SELECT_NODE_MAX_RETRY; retry++) {
530 const PeerId p = selectPeer();
531
532 // If we cannot find a peer, it may be due to the fact that it is
533 // unlikely due to high fragmentation, so compact and retry.
534 if (p == NO_PEER) {
535 compact();
536 continue;
537 }
538
539 // See if that peer has an available node.
540 auto &nview = nodes.get<next_request_time>();
541 auto it = nview.lower_bound(boost::make_tuple(p, SteadyMilliseconds()));
542 if (it != nview.end() && it->peerid == p &&
543 it->nextRequestTime <= Now<SteadyMilliseconds>()) {
544 return it->nodeid;
545 }
546 }
547
548 // We failed to find a node to query, flag this so we can request more
549 needMoreNodes = true;
550
551 return NO_NODE;
552}
553
554std::unordered_set<ProofRef, SaltedProofHasher> PeerManager::updatedBlockTip() {
555 std::vector<ProofId> invalidProofIds;
556 std::vector<ProofRef> newImmatures;
557
558 {
559 LOCK(cs_main);
560
561 for (const auto &p : peers) {
563 if (!p.proof->verify(stakeUtxoDustThreshold, chainman, state)) {
564 if (isImmatureState(state)) {
565 newImmatures.push_back(p.proof);
566 }
567 invalidProofIds.push_back(p.getProofId());
568
570 "Invalidating proof %s: verification failed (%s)\n",
571 p.proof->getId().GetHex(), state.ToString());
572 }
573 }
574
575 // Disable thread safety analysis here because it does not play nicely
576 // with the lambda
578 [&](const ProofRef &proof) NO_THREAD_SAFETY_ANALYSIS {
581 if (!proof->verify(stakeUtxoDustThreshold, chainman, state)) {
582 invalidProofIds.push_back(proof->getId());
583
584 LogPrint(
586 "Invalidating dangling proof %s: verification failed "
587 "(%s)\n",
588 proof->getId().GetHex(), state.ToString());
589 }
590 });
591 }
592
593 // Remove the invalid proofs before the immature rescan. This makes it
594 // possible to pull back proofs with utxos that conflicted with these
595 // invalid proofs.
596 for (const ProofId &invalidProofId : invalidProofIds) {
597 rejectProof(invalidProofId, RejectionMode::INVALIDATE);
598 }
599
600 auto registeredProofs = immatureProofPool.rescan(*this);
601
602 for (auto &p : newImmatures) {
604 }
605
606 return registeredProofs;
607}
608
610 ProofRef proof;
611
612 forPeer(proofid, [&](const Peer &p) {
613 proof = p.proof;
614 return true;
615 });
616
617 if (!proof) {
618 proof = conflictingProofPool.getProof(proofid);
619 }
620
621 if (!proof) {
622 proof = immatureProofPool.getProof(proofid);
623 }
624
625 return proof;
626}
627
628bool PeerManager::isBoundToPeer(const ProofId &proofid) const {
629 auto &pview = peers.get<by_proofid>();
630 return pview.find(proofid) != pview.end();
631}
632
633bool PeerManager::isImmature(const ProofId &proofid) const {
634 return immatureProofPool.getProof(proofid) != nullptr;
635}
636
637bool PeerManager::isInConflictingPool(const ProofId &proofid) const {
638 return conflictingProofPool.getProof(proofid) != nullptr;
639}
640
641bool PeerManager::isDangling(const ProofId &proofid) const {
642 return danglingProofPool.getProof(proofid) != nullptr;
643}
644
645void PeerManager::setInvalid(const ProofId &proofid) {
646 invalidProofs.insert(proofid);
647}
648
649bool PeerManager::isInvalid(const ProofId &proofid) const {
650 return invalidProofs.contains(proofid);
651}
652
655}
656
657bool PeerManager::saveRemoteProof(const ProofId &proofid, const NodeId nodeid,
658 const bool present) {
659 // Get how many proofs this node has announced
660 auto &remoteProofsByLastUpdate = remoteProofs.get<by_lastUpdate>();
661 auto [begin, end] = remoteProofsByLastUpdate.equal_range(nodeid);
662
663 // Limit the number of proofs a single node can save:
664 // - At least MAX_REMOTE_PROOFS
665 // - Up to 2x as much as we have
666 // The MAX_REMOTE_PROOFS minimum is there to ensure we don't overlimit at
667 // startup when we don't have proofs yet.
668 while (size_t(std::distance(begin, end)) >=
669 std::max(MAX_REMOTE_PROOFS, 2 * peers.size())) {
670 // Remove the proof with the oldest update time
671 begin = remoteProofsByLastUpdate.erase(begin);
672 }
673
674 auto it = remoteProofs.find(boost::make_tuple(proofid, nodeid));
675 if (it != remoteProofs.end()) {
676 remoteProofs.erase(it);
677 }
678
679 return remoteProofs
680 .emplace(RemoteProof{proofid, nodeid, GetTime<std::chrono::seconds>(),
681 present})
682 .second;
683}
684
685std::vector<RemoteProof>
687 std::vector<RemoteProof> nodeRemoteProofs;
688
689 auto &remoteProofsByLastUpdate = remoteProofs.get<by_lastUpdate>();
690 auto [begin, end] = remoteProofsByLastUpdate.equal_range(nodeid);
691
692 for (auto &it = begin; it != end; it++) {
693 nodeRemoteProofs.emplace_back(*it);
694 }
695
696 return nodeRemoteProofs;
697}
698
699bool PeerManager::isRemoteProof(const ProofId &proofid) const {
700 auto &view = remoteProofs.get<by_proofid>();
701 return view.count(proofid) > 0;
702}
703
704bool PeerManager::removePeer(const PeerId peerid) {
705 auto it = peers.find(peerid);
706 if (it == peers.end()) {
707 return false;
708 }
709
710 // Remove all nodes from this peer.
711 removeNodeFromPeer(it, it->node_count);
712
713 auto &nview = nodes.get<next_request_time>();
714
715 // Add the nodes to the pending set
716 auto range = nview.equal_range(peerid);
717 for (auto &nit = range.first; nit != range.second; ++nit) {
718 pendingNodes.emplace(it->getProofId(), nit->nodeid);
719 };
720
721 // Remove nodes associated with this peer, unless their timeout is still
722 // active. This ensure that we don't overquery them in case they are
723 // subsequently added to another peer.
724 nview.erase(
725 nview.lower_bound(boost::make_tuple(peerid, SteadyMilliseconds())),
726 nview.upper_bound(
727 boost::make_tuple(peerid, Now<SteadyMilliseconds>())));
728
729 // Release UTXOs attached to this proof.
730 validProofPool.removeProof(it->getProofId());
731
732 // If there were nodes attached, remove from the radix tree as well
733 auto removed = shareableProofs.remove(Uint256RadixKey(it->getProofId()));
734
735 m_unbroadcast_proofids.erase(it->getProofId());
736
737 // Remove the peer from the PeerSet and remove its score from the registered
738 // score total.
739 assert(totalPeersScore >= it->getScore());
740 totalPeersScore -= it->getScore();
741 peers.erase(it);
742 return true;
743}
744
746 if (slots.empty() || slotCount == 0) {
747 return NO_PEER;
748 }
749
750 const uint64_t max = slotCount;
751 for (int retry = 0; retry < SELECT_PEER_MAX_RETRY; retry++) {
752 size_t i = selectPeerImpl(slots, GetRand(max), max);
753 if (i != NO_PEER) {
754 return i;
755 }
756 }
757
758 return NO_PEER;
759}
760
762 // There is nothing to compact.
763 if (fragmentation == 0) {
764 return 0;
765 }
766
767 std::vector<Slot> newslots;
768 newslots.reserve(peers.size());
769
770 uint64_t prevStop = 0;
771 uint32_t i = 0;
772 for (auto it = peers.begin(); it != peers.end(); it++) {
773 if (it->node_count == 0) {
774 continue;
775 }
776
777 newslots.emplace_back(prevStop, it->getScore(), it->peerid);
778 prevStop = slots[i].getStop();
779 if (!peers.modify(it, [&](Peer &p) { p.index = i++; })) {
780 return 0;
781 }
782 }
783
784 slots = std::move(newslots);
785
786 const uint64_t saved = slotCount - prevStop;
787 slotCount = prevStop;
788 fragmentation = 0;
789
790 return saved;
791}
792
794 uint64_t prevStop = 0;
795 uint32_t scoreFromSlots = 0;
796 for (size_t i = 0; i < slots.size(); i++) {
797 const Slot &s = slots[i];
798
799 // Slots must be in correct order.
800 if (s.getStart() < prevStop) {
801 return false;
802 }
803
804 prevStop = s.getStop();
805
806 // If this is a dead slot, then nothing more needs to be checked.
807 if (s.getPeerId() == NO_PEER) {
808 continue;
809 }
810
811 // We have a live slot, verify index.
812 auto it = peers.find(s.getPeerId());
813 if (it == peers.end() || it->index != i) {
814 return false;
815 }
816
817 // Accumulate score across slots
818 scoreFromSlots += slots[i].getScore();
819 }
820
821 // Score across slots must be the same as our allocated score
822 if (scoreFromSlots != connectedPeersScore) {
823 return false;
824 }
825
826 uint32_t scoreFromAllPeers = 0;
827 uint32_t scoreFromPeersWithNodes = 0;
828
829 std::unordered_set<COutPoint, SaltedOutpointHasher> peersUtxos;
830 for (const auto &p : peers) {
831 // Accumulate the score across peers to compare with total known score
832 scoreFromAllPeers += p.getScore();
833
834 // A peer should have a proof attached
835 if (!p.proof) {
836 return false;
837 }
838
839 // Check proof pool consistency
840 for (const auto &ss : p.proof->getStakes()) {
841 const COutPoint &outpoint = ss.getStake().getUTXO();
842 auto proof = validProofPool.getProof(outpoint);
843
844 if (!proof) {
845 // Missing utxo
846 return false;
847 }
848 if (proof != p.proof) {
849 // Wrong proof
850 return false;
851 }
852
853 if (!peersUtxos.emplace(outpoint).second) {
854 // Duplicated utxo
855 return false;
856 }
857 }
858
859 // Count node attached to this peer.
860 const auto count_nodes = [&]() {
861 size_t count = 0;
862 auto &nview = nodes.get<next_request_time>();
863 auto begin = nview.lower_bound(
864 boost::make_tuple(p.peerid, SteadyMilliseconds()));
865 auto end = nview.upper_bound(
866 boost::make_tuple(p.peerid + 1, SteadyMilliseconds()));
867
868 for (auto it = begin; it != end; ++it) {
869 count++;
870 }
871
872 return count;
873 };
874
875 if (p.node_count != count_nodes()) {
876 return false;
877 }
878
879 // If there are no nodes attached to this peer, then we are done.
880 if (p.node_count == 0) {
881 continue;
882 }
883
884 scoreFromPeersWithNodes += p.getScore();
885 // The index must point to a slot refering to this peer.
886 if (p.index >= slots.size() || slots[p.index].getPeerId() != p.peerid) {
887 return false;
888 }
889
890 // If the score do not match, same thing.
891 if (slots[p.index].getScore() != p.getScore()) {
892 return false;
893 }
894
895 // Check the proof is in the radix tree only if there are nodes attached
896 if (((localProof && p.getProofId() == localProof->getId()) ||
897 p.node_count > 0) &&
898 shareableProofs.get(p.getProofId()) == nullptr) {
899 return false;
900 }
901 if (p.node_count == 0 &&
902 shareableProofs.get(p.getProofId()) != nullptr) {
903 return false;
904 }
905 }
906
907 // Check our accumulated scores against our registred and allocated scores
908 if (scoreFromAllPeers != totalPeersScore) {
909 return false;
910 }
911 if (scoreFromPeersWithNodes != connectedPeersScore) {
912 return false;
913 }
914
915 // We checked the utxo consistency for all our peers utxos already, so if
916 // the pool size differs from the expected one there are dangling utxos.
917 if (validProofPool.size() != peersUtxos.size()) {
918 return false;
919 }
920
921 // Check there is no dangling proof in the radix tree
923 return isBoundToPeer(pLeaf->getId());
924 });
925}
926
927PeerId selectPeerImpl(const std::vector<Slot> &slots, const uint64_t slot,
928 const uint64_t max) {
929 assert(slot <= max);
930
931 size_t begin = 0, end = slots.size();
932 uint64_t bottom = 0, top = max;
933
934 // Try to find the slot using dichotomic search.
935 while ((end - begin) > 8) {
936 // The slot we picked in not allocated.
937 if (slot < bottom || slot >= top) {
938 return NO_PEER;
939 }
940
941 // Guesstimate the position of the slot.
942 size_t i = begin + ((slot - bottom) * (end - begin) / (top - bottom));
943 assert(begin <= i && i < end);
944
945 // We have a match.
946 if (slots[i].contains(slot)) {
947 return slots[i].getPeerId();
948 }
949
950 // We undershooted.
951 if (slots[i].precedes(slot)) {
952 begin = i + 1;
953 if (begin >= end) {
954 return NO_PEER;
955 }
956
957 bottom = slots[begin].getStart();
958 continue;
959 }
960
961 // We overshooted.
962 if (slots[i].follows(slot)) {
963 end = i;
964 top = slots[end].getStart();
965 continue;
966 }
967
968 // We have an unalocated slot.
969 return NO_PEER;
970 }
971
972 // Enough of that nonsense, let fallback to linear search.
973 for (size_t i = begin; i < end; i++) {
974 // We have a match.
975 if (slots[i].contains(slot)) {
976 return slots[i].getPeerId();
977 }
978 }
979
980 // We failed to find a slot, retry.
981 return NO_PEER;
982}
983
985 // The proof should be bound to a peer
986 if (isBoundToPeer(proofid)) {
987 m_unbroadcast_proofids.insert(proofid);
988 }
989}
990
992 m_unbroadcast_proofids.erase(proofid);
993}
994
996 const CBlockIndex *pprev,
997 std::vector<std::pair<ProofId, CScript>> &winners) {
998 if (!pprev) {
999 return false;
1000 }
1001
1002 // Don't select proofs that have not been known for long enough, i.e. at
1003 // least since twice the dangling proof cleanup timeout before the last
1004 // block time, so we're sure to not account for proofs more recent than the
1005 // previous block or lacking node connected.
1006 // The previous block time is capped to now for the unlikely event the
1007 // previous block time is in the future.
1008 auto registrationDelay = std::chrono::duration_cast<std::chrono::seconds>(
1010 auto maxRegistrationDelay =
1011 std::chrono::duration_cast<std::chrono::seconds>(
1013 auto minRegistrationDelay =
1014 std::chrono::duration_cast<std::chrono::seconds>(
1016
1017 const int64_t refTime = std::min(pprev->GetBlockTime(), GetTime());
1018
1019 const int64_t targetRegistrationTime = refTime - registrationDelay.count();
1020 const int64_t maxRegistrationTime = refTime - minRegistrationDelay.count();
1021 const int64_t minRegistrationTime = refTime - maxRegistrationDelay.count();
1022
1023 const BlockHash prevblockhash = pprev->GetBlockHash();
1024
1025 std::vector<ProofRef> selectedProofs;
1026 ProofRef firstCompliantProof = ProofRef();
1027 while (selectedProofs.size() < peers.size()) {
1028 double bestRewardRank = std::numeric_limits<double>::max();
1029 ProofRef selectedProof = ProofRef();
1030 int64_t selectedProofRegistrationTime{0};
1031 uint256 bestRewardHash;
1032
1033 for (const Peer &peer : peers) {
1034 if (!peer.proof) {
1035 // Should never happen, continue
1036 continue;
1037 }
1038
1039 if (!peer.hasFinalized ||
1040 peer.registration_time.count() >= maxRegistrationTime) {
1041 continue;
1042 }
1043
1044 if (std::find_if(selectedProofs.begin(), selectedProofs.end(),
1045 [&peer](const ProofRef &proof) {
1046 return peer.getProofId() == proof->getId();
1047 }) != selectedProofs.end()) {
1048 continue;
1049 }
1050
1051 StakeContenderId proofRewardHash(prevblockhash, peer.getProofId());
1052 if (proofRewardHash == uint256::ZERO) {
1053 // This either the result of an incredibly unlikely lucky hash,
1054 // or a the hash is getting abused. In this case, skip the
1055 // proof.
1056 LogPrintf(
1057 "Staking reward hash has a suspicious value of zero for "
1058 "proof %s and blockhash %s, skipping\n",
1059 peer.getProofId().ToString(), prevblockhash.ToString());
1060 continue;
1061 }
1062
1063 // The best ranking is the lowest ranking value
1064 double proofRewardRank =
1065 proofRewardHash.ComputeProofRewardRank(peer.getScore());
1066 if (proofRewardRank < bestRewardRank) {
1067 bestRewardRank = proofRewardRank;
1068 selectedProof = peer.proof;
1069 selectedProofRegistrationTime = peer.registration_time.count();
1070 bestRewardHash = proofRewardHash;
1071 }
1072
1073 // Select the lowest reward hash then proofid in the unlikely case
1074 // of a collision.
1075 if (proofRewardRank == bestRewardRank &&
1076 (proofRewardHash < bestRewardHash ||
1077 (proofRewardHash == bestRewardHash &&
1078 peer.getProofId() < selectedProof->getId()))) {
1079 selectedProof = peer.proof;
1080 selectedProofRegistrationTime = peer.registration_time.count();
1081 bestRewardHash = proofRewardHash;
1082 }
1083 }
1084
1085 if (!selectedProof) {
1086 // No winner
1087 break;
1088 }
1089
1090 if (!firstCompliantProof &&
1091 selectedProofRegistrationTime < targetRegistrationTime) {
1092 firstCompliantProof = selectedProof;
1093 }
1094
1095 selectedProofs.push_back(selectedProof);
1096
1097 if (selectedProofRegistrationTime < minRegistrationTime &&
1098 !isFlaky(selectedProof->getId())) {
1099 break;
1100 }
1101 }
1102
1103 winners.clear();
1104
1105 if (!firstCompliantProof) {
1106 return false;
1107 }
1108
1109 winners.reserve(selectedProofs.size());
1110
1111 // Find the winner
1112 for (const ProofRef &proof : selectedProofs) {
1113 if (proof->getId() == firstCompliantProof->getId()) {
1114 winners.push_back({proof->getId(), proof->getPayoutScript()});
1115 }
1116 }
1117 // Add the others (if any) after the winner
1118 for (const ProofRef &proof : selectedProofs) {
1119 if (proof->getId() != firstCompliantProof->getId()) {
1120 winners.push_back({proof->getId(), proof->getPayoutScript()});
1121 }
1122 }
1123
1124 return true;
1125}
1126
1127bool PeerManager::setFlaky(const ProofId &proofid) {
1128 return manualFlakyProofids.insert(proofid).second;
1129}
1130
1131bool PeerManager::unsetFlaky(const ProofId &proofid) {
1132 return manualFlakyProofids.erase(proofid) > 0;
1133}
1134
1135bool PeerManager::isFlaky(const ProofId &proofid) const {
1136 if (localProof && proofid == localProof->getId()) {
1137 return false;
1138 }
1139
1140 if (manualFlakyProofids.count(proofid) > 0) {
1141 return true;
1142 }
1143
1144 // If we are missing connection to this proof, consider flaky
1145 if (forPeer(proofid,
1146 [](const Peer &peer) { return peer.node_count == 0; })) {
1147 return true;
1148 }
1149
1150 auto &remoteProofsByNodeId = remoteProofs.get<by_nodeid>();
1151 auto &nview = nodes.get<next_request_time>();
1152
1153 std::unordered_map<PeerId, std::unordered_set<ProofId, SaltedProofIdHasher>>
1154 missing_per_peer;
1155
1156 // Construct a set of missing proof ids per peer
1157 double total_score{0};
1158 for (const Peer &peer : peers) {
1159 const PeerId peerid = peer.peerid;
1160
1161 total_score += peer.getScore();
1162
1163 auto nodes_range = nview.equal_range(peerid);
1164 for (auto &nit = nodes_range.first; nit != nodes_range.second; ++nit) {
1165 auto proofs_range = remoteProofsByNodeId.equal_range(nit->nodeid);
1166 for (auto &proofit = proofs_range.first;
1167 proofit != proofs_range.second; ++proofit) {
1168 if (!proofit->present) {
1169 missing_per_peer[peerid].insert(proofit->proofid);
1170 }
1171 }
1172 };
1173 }
1174
1175 double missing_score{0};
1176
1177 // Now compute a score for the missing proof
1178 for (const auto &[peerid, missingProofs] : missing_per_peer) {
1179 if (missingProofs.size() > 3) {
1180 // Ignore peers with too many missing proofs
1181 continue;
1182 }
1183
1184 auto pit = peers.find(peerid);
1185 if (pit == peers.end()) {
1186 // Peer not found
1187 continue;
1188 }
1189
1190 if (missingProofs.count(proofid) > 0) {
1191 missing_score += pit->getScore();
1192 }
1193 }
1194
1195 return (missing_score / total_score) > 0.3;
1196}
1197
1198std::optional<bool>
1200 auto &remoteProofsView = remoteProofs.get<by_proofid>();
1201 auto [begin, end] = remoteProofsView.equal_range(proofid);
1202
1203 if (begin == end) {
1204 // No remote registered anything yet, we are on our own
1205 return std::nullopt;
1206 }
1207
1208 double total_score{0};
1209 double present_score{0};
1210 double missing_score{0};
1211
1212 for (auto it = begin; it != end; it++) {
1213 auto nit = nodes.find(it->nodeid);
1214 if (nit == nodes.end()) {
1215 // No such node
1216 continue;
1217 }
1218
1219 const PeerId peerid = nit->peerid;
1220
1221 auto pit = peers.find(peerid);
1222 if (pit == peers.end()) {
1223 // Peer not found
1224 continue;
1225 }
1226
1227 uint32_t node_count = pit->node_count;
1228 if (localProof && pit->getProofId() == localProof->getId()) {
1229 // If that's our local proof, account for ourself
1230 ++node_count;
1231 }
1232
1233 if (node_count == 0) {
1234 // should never happen
1235 continue;
1236 }
1237
1238 const double score = double(pit->getScore()) / node_count;
1239
1240 total_score += score;
1241 if (it->present) {
1242 present_score += score;
1243 } else {
1244 missing_score += score;
1245 }
1246 }
1247
1248 if (localProof) {
1249 auto &peersByProofid = peers.get<by_proofid>();
1250
1251 // Do we have a node connected for that proof ?
1252 bool present = false;
1253 auto pit = peersByProofid.find(proofid);
1254 if (pit != peersByProofid.end()) {
1255 present = pit->node_count > 0;
1256 }
1257
1258 pit = peersByProofid.find(localProof->getId());
1259 if (pit != peersByProofid.end()) {
1260 // Also divide by node_count, we can have several nodes even for our
1261 // local proof.
1262 const double score =
1263 double(pit->getScore()) / (1 + pit->node_count);
1264
1265 total_score += score;
1266 if (present) {
1267 present_score += score;
1268 } else {
1269 missing_score += score;
1270 }
1271 }
1272 }
1273
1274 if (present_score / total_score > 0.55) {
1275 return std::make_optional(true);
1276 }
1277
1278 if (missing_score / total_score > 0.55) {
1279 return std::make_optional(false);
1280 }
1281
1282 return std::nullopt;
1283}
1284
1285bool PeerManager::dumpPeersToFile(const fs::path &dumpPath) const {
1286 try {
1287 const fs::path dumpPathTmp = dumpPath + ".new";
1288 FILE *filestr = fsbridge::fopen(dumpPathTmp, "wb");
1289 if (!filestr) {
1290 return false;
1291 }
1292
1293 CAutoFile file(filestr, SER_DISK, CLIENT_VERSION);
1294 file << PEERS_DUMP_VERSION;
1295 file << uint64_t(peers.size());
1296 for (const Peer &peer : peers) {
1297 file << peer.proof;
1298 file << peer.hasFinalized;
1299 file << int64_t(peer.registration_time.count());
1300 file << int64_t(peer.nextPossibleConflictTime.count());
1301 }
1302
1303 if (!FileCommit(file.Get())) {
1304 throw std::runtime_error(strprintf("Failed to commit to file %s",
1305 PathToString(dumpPathTmp)));
1306 }
1307 file.fclose();
1308
1309 if (!RenameOver(dumpPathTmp, dumpPath)) {
1310 throw std::runtime_error(strprintf("Rename failed from %s to %s",
1311 PathToString(dumpPathTmp),
1312 PathToString(dumpPath)));
1313 }
1314 } catch (const std::exception &e) {
1315 LogPrint(BCLog::AVALANCHE, "Failed to dump the avalanche peers: %s.\n",
1316 e.what());
1317 return false;
1318 }
1319
1320 LogPrint(BCLog::AVALANCHE, "Successfully dumped %d peers to %s.\n",
1321 peers.size(), PathToString(dumpPath));
1322
1323 return true;
1324}
1325
1327 const fs::path &dumpPath,
1328 std::unordered_set<ProofRef, SaltedProofHasher> &registeredProofs) {
1329 registeredProofs.clear();
1330
1331 FILE *filestr = fsbridge::fopen(dumpPath, "rb");
1332 CAutoFile file(filestr, SER_DISK, CLIENT_VERSION);
1333 if (file.IsNull()) {
1335 "Failed to open avalanche peers file from disk.\n");
1336 return false;
1337 }
1338
1339 try {
1340 uint64_t version;
1341 file >> version;
1342
1343 if (version != PEERS_DUMP_VERSION) {
1345 "Unsupported avalanche peers file version.\n");
1346 return false;
1347 }
1348
1349 uint64_t numPeers;
1350 file >> numPeers;
1351
1352 auto &peersByProofId = peers.get<by_proofid>();
1353
1354 for (uint64_t i = 0; i < numPeers; i++) {
1355 ProofRef proof;
1356 bool hasFinalized;
1357 int64_t registrationTime;
1358 int64_t nextPossibleConflictTime;
1359
1360 file >> proof;
1361 file >> hasFinalized;
1362 file >> registrationTime;
1363 file >> nextPossibleConflictTime;
1364
1365 if (registerProof(proof)) {
1366 auto it = peersByProofId.find(proof->getId());
1367 if (it == peersByProofId.end()) {
1368 // Should never happen
1369 continue;
1370 }
1371
1372 // We don't modify any key so we don't need to rehash.
1373 // If the modify fails, it means we don't get the full benefit
1374 // from the file but we still added our peer to the set. The
1375 // non-overridden fields will be set the normal way.
1376 peersByProofId.modify(it, [&](Peer &p) {
1377 p.hasFinalized = hasFinalized;
1379 std::chrono::seconds{registrationTime};
1381 std::chrono::seconds{nextPossibleConflictTime};
1382 });
1383
1384 registeredProofs.insert(proof);
1385 }
1386 }
1387 } catch (const std::exception &e) {
1389 "Failed to read the avalanche peers file data on disk: %s.\n",
1390 e.what());
1391 return false;
1392 }
1393
1394 return true;
1395}
1396
1397} // namespace avalanche
ArgsManager gArgs
Definition: args.cpp:38
static constexpr PeerId NO_PEER
Definition: node.h:16
uint32_t PeerId
Definition: node.h:15
static constexpr size_t AVALANCHE_DEFAULT_CONFLICTING_PROOF_COOLDOWN
Conflicting proofs cooldown time default value in seconds.
Definition: avalanche.h:28
int64_t GetIntArg(const std::string &strArg, int64_t nDefault) const
Return integer argument or default value.
Definition: args.cpp:526
bool IsNull() const
Return true if the wrapped FILE* is nullptr, false otherwise.
Definition: streams.h:570
FILE * Get() const
Get wrapped FILE* without transfer of ownership.
Definition: streams.h:567
int fclose()
Definition: streams.h:541
The block chain is a tree shaped structure starting with the genesis block at the root,...
Definition: blockindex.h:25
int64_t GetBlockTime() const
Definition: blockindex.h:180
BlockHash GetBlockHash() const
Definition: blockindex.h:146
void insert(Span< const uint8_t > vKey)
Definition: bloom.cpp:215
bool contains(Span< const uint8_t > vKey) const
Definition: bloom.cpp:249
bool Invalid(Result result, const std::string &reject_reason="", const std::string &debug_message="")
Definition: validation.h:101
Result GetResult() const
Definition: validation.h:122
std::string ToString() const
Definition: validation.h:125
bool selectStakingRewardWinner(const CBlockIndex *pprev, std::vector< std::pair< ProofId, CScript > > &winners)
Deterministically select a list of payout scripts based on the proof set and the previous block hash.
uint32_t connectedPeersScore
Definition: peermanager.h:239
std::vector< RemoteProof > getRemoteProofs(const NodeId nodeid) const
bool removeNode(NodeId nodeid)
bool setFinalized(PeerId peerid)
Latch on that this peer has a finalized proof.
bool dumpPeersToFile(const fs::path &dumpPath) const
RemoteProofSet remoteProofs
Remember which node sent which proof so we have an image of the proof set of our peers.
Definition: peermanager.h:281
bool isDangling(const ProofId &proofid) const
bool updateNextRequestTime(NodeId nodeid, SteadyMilliseconds timeout)
bool unsetFlaky(const ProofId &proofid)
std::optional< bool > getRemotePresenceStatus(const ProofId &proofid) const
Get the presence remote status of a proof.
bool addNodeToPeer(const PeerSet::iterator &it)
Definition: peermanager.cpp:87
bool exists(const ProofId &proofid) const
Return true if the (valid) proof exists, but only for non-dangling proofs.
Definition: peermanager.h:406
bool isRemoteProof(const ProofId &proofid) const
PendingNodeSet pendingNodes
Definition: peermanager.h:225
bool verify() const
Perform consistency check on internal data structures.
bool forPeer(const ProofId &proofid, Callable &&func) const
Definition: peermanager.h:414
bool latchAvaproofsSent(NodeId nodeid)
Flag that a node did send its compact proofs.
bool addNode(NodeId nodeid, const ProofId &proofid)
Node API.
Definition: peermanager.cpp:31
static constexpr int SELECT_PEER_MAX_RETRY
Definition: peermanager.h:227
ProofIdSet m_unbroadcast_proofids
Track proof ids to broadcast.
Definition: peermanager.h:233
bool loadPeersFromFile(const fs::path &dumpPath, std::unordered_set< ProofRef, SaltedProofHasher > &registeredProofs)
RejectionMode
Rejection mode.
Definition: peermanager.h:394
void addUnbroadcastProof(const ProofId &proofid)
Proof broadcast API.
std::unordered_set< ProofRef, SaltedProofHasher > updatedBlockTip()
Update the peer set when a new block is connected.
void removeUnbroadcastProof(const ProofId &proofid)
bool isBoundToPeer(const ProofId &proofid) const
ProofRadixTree shareableProofs
Definition: peermanager.h:191
bool saveRemoteProof(const ProofId &proofid, const NodeId nodeid, const bool present)
CRollingBloomFilter invalidProofs
Filter for proofs that are consensus-invalid or were recently invalidated by avalanche (finalized rej...
Definition: peermanager.h:295
uint64_t compact()
Trigger maintenance of internal data structures.
std::vector< Slot > slots
Definition: peermanager.h:163
uint32_t totalPeersScore
Quorum management.
Definition: peermanager.h:238
ProofPool danglingProofPool
Definition: peermanager.h:188
void setInvalid(const ProofId &proofid)
bool isFlaky(const ProofId &proofid) const
ChainstateManager & chainman
Definition: peermanager.h:243
bool isInvalid(const ProofId &proofid) const
std::unordered_set< ProofId, SaltedProofIdHasher > manualFlakyProofids
Definition: peermanager.h:297
bool removePeer(const PeerId peerid)
Remove an existing peer.
bool isImmature(const ProofId &proofid) const
bool addOrUpdateNode(const PeerSet::iterator &it, NodeId nodeid)
Definition: peermanager.cpp:46
bool rejectProof(const ProofId &proofid, RejectionMode mode=RejectionMode::DEFAULT)
ProofPool immatureProofPool
Definition: peermanager.h:187
RegistrationMode
Registration mode.
Definition: peermanager.h:371
ProofPool conflictingProofPool
Definition: peermanager.h:186
static constexpr size_t MAX_REMOTE_PROOFS
Definition: peermanager.h:300
bool setFlaky(const ProofId &proofid)
std::atomic< bool > needMoreNodes
Flag indicating that we failed to select a node and need to expand our node set.
Definition: peermanager.h:211
PeerId selectPeer() const
Randomly select a peer to poll.
bool isInConflictingPool(const ProofId &proofid) const
static constexpr int SELECT_NODE_MAX_RETRY
Definition: peermanager.h:228
void cleanupDanglingProofs(std::unordered_set< ProofRef, SaltedProofHasher > &registeredProofs)
ProofRef getProof(const ProofId &proofid) const
bool registerProof(const ProofRef &proof, ProofRegistrationState &registrationState, RegistrationMode mode=RegistrationMode::DEFAULT)
bool removeNodeFromPeer(const PeerSet::iterator &it, uint32_t count=1)
bool updateNextPossibleConflictTime(PeerId peerid, const std::chrono::seconds &nextTime)
Proof and Peer related API.
void moveToConflictingPool(const ProofContainer &proofs)
AddProofStatus addProofIfPreferred(const ProofRef &proof, ConflictingProofSet &conflictingProofs)
Attempt to add a proof to the pool.
Definition: proofpool.cpp:54
size_t size() const
Definition: proofpool.h:135
AddProofStatus addProofIfNoConflict(const ProofRef &proof, ConflictingProofSet &conflictingProofs)
Attempt to add a proof to the pool, and fail if there is a conflict on any UTXO.
Definition: proofpool.cpp:13
size_t countProofs() const
Definition: proofpool.cpp:129
bool removeProof(ProofId proofid)
Definition: proofpool.cpp:79
void forEachProof(Callable &&func) const
Definition: proofpool.h:118
ProofRef getProof(const ProofId &proofid) const
Definition: proofpool.cpp:112
std::set< ProofRef, ConflictingProofComparator > ConflictingProofSet
Definition: proofpool.h:88
ProofRef getLowestScoreProof() const
Definition: proofpool.cpp:123
std::unordered_set< ProofRef, SaltedProofHasher > rescan(PeerManager &peerManager)
Definition: proofpool.cpp:86
std::string ToString() const
Definition: uint256.h:80
Path class wrapper to block calls to the fs::path(std::string) implicit constructor and the fs::path:...
Definition: fs.h:30
256-bit opaque blob.
Definition: uint256.h:129
static const uint256 ZERO
Definition: uint256.h:134
static constexpr int CLIENT_VERSION
bitcoind-res.rc includes this file, but it cannot cope with real c++ code.
Definition: clientversion.h:38
RecursiveMutex cs_main
Mutex to guard access to validation specific variables, such as reading or changing the chainstate.
Definition: cs_main.cpp:7
bool RenameOver(fs::path src, fs::path dest)
Definition: fs_helpers.cpp:272
bool FileCommit(FILE *file)
Ensure file contents are fully committed to disk, using a platform-specific feature analogous to fsyn...
Definition: fs_helpers.cpp:125
#define LogPrint(category,...)
Definition: logging.h:238
#define LogPrintf(...)
Definition: logging.h:227
@ AVALANCHE
Definition: logging.h:62
ProofRegistrationResult
Definition: peermanager.h:145
static constexpr uint32_t AVALANCHE_MAX_IMMATURE_PROOFS
Maximum number of immature proofs the peer manager will accept from the network.
Definition: peermanager.h:44
static bool isImmatureState(const ProofValidationState &state)
static constexpr uint64_t PEERS_DUMP_VERSION
Definition: peermanager.cpp:29
PeerId selectPeerImpl(const std::vector< Slot > &slots, const uint64_t slot, const uint64_t max)
Internal methods that are exposed for testing purposes.
RCUPtr< const Proof > ProofRef
Definition: proof.h:185
static std::string PathToString(const path &path)
Convert path object to byte string.
Definition: fs.h:142
FILE * fopen(const fs::path &p, const char *mode)
Definition: fs.cpp:30
static constexpr NodeId NO_NODE
Special NodeId that represent no node.
Definition: nodeid.h:15
int64_t NodeId
Definition: nodeid.h:10
T GetRand(T nMax=std::numeric_limits< T >::max()) noexcept
Generate a uniform random integer of type T in the range [0..nMax) nMax defaults to std::numeric_limi...
Definition: random.h:85
@ SER_DISK
Definition: serialize.h:153
A BlockHash is a unqiue identifier for a block.
Definition: blockhash.h:13
RCUPtr< T > remove(const KeyType &key)
Remove an element from the tree.
Definition: radix.h:181
RCUPtr< T > get(const KeyType &key)
Get the value corresponding to a key.
Definition: radix.h:118
bool forEachLeaf(Callable &&func) const
Definition: radix.h:144
bool insert(const RCUPtr< T > &value)
Insert a value into the tree.
Definition: radix.h:112
Facility for using an uint256 as a radix tree key.
SteadyMilliseconds nextRequestTime
Definition: node.h:23
bool avaproofsSent
Definition: node.h:24
std::chrono::seconds registration_time
Definition: peermanager.h:93
std::chrono::seconds nextPossibleConflictTime
Definition: peermanager.h:94
uint32_t node_count
Definition: peermanager.h:87
static constexpr auto DANGLING_TIMEOUT
Consider dropping the peer if no node is attached after this timeout expired.
Definition: peermanager.h:102
uint32_t index
Definition: peermanager.h:86
uint32_t getScore() const
Definition: peermanager.h:111
ProofRef proof
Definition: peermanager.h:89
uint64_t getStop() const
Definition: peermanager.h:73
uint64_t getStart() const
Definition: peermanager.h:72
PeerId getPeerId() const
Definition: peermanager.h:75
StakeContenderIds are unique for each block to ensure that the peer polling for their acceptance has ...
double ComputeProofRewardRank(uint32_t proofScore)
To make sure the selection is properly weighted according to the proof score, we normalize the conten...
#define LOCK(cs)
Definition: sync.h:306
#define WITH_LOCK(cs, code)
Run code while locking a mutex.
Definition: sync.h:357
static int count
Definition: tests.c:31
#define NO_THREAD_SAFETY_ANALYSIS
Definition: threadsafety.h:58
int64_t GetTime()
DEPRECATED Use either ClockType::now() or Now<TimePointType>() if a cast is needed.
Definition: time.cpp:109
std::chrono::time_point< std::chrono::steady_clock, std::chrono::milliseconds > SteadyMilliseconds
Definition: time.h:31
#define strprintf
Format arguments and return the string or write to given std::ostream (see tinyformat::format doc for...
Definition: tinyformat.h:1202
AssertLockHeld(pool.cs)
assert(!tx.IsCoinBase())