Bitcoin ABC 0.32.12
P2P Digital Currency
peermanager.cpp
Go to the documentation of this file.
1// Copyright (c) 2020 The Bitcoin developers
2// Distributed under the MIT software license, see the accompanying
3// file COPYING or http://www.opensource.org/licenses/mit-license.php.
4
6
12#include <cashaddrenc.h>
13#include <common/args.h>
15#include <logging.h>
16#include <random.h>
17#include <scheduler.h>
18#include <threadsafety.h>
19#include <uint256.h>
20#include <util/fastrange.h>
21#include <util/fs_helpers.h>
22#include <util/strencodings.h>
23#include <util/time.h>
24#include <validation.h> // For ChainstateManager
25
26#include <algorithm>
27#include <cassert>
28#include <limits>
29
30namespace avalanche {
31static constexpr uint64_t PEERS_DUMP_VERSION{1};
32
33bool PeerManager::addNode(NodeId nodeid, const ProofId &proofid) {
34 auto &pview = peers.get<by_proofid>();
35 auto it = pview.find(proofid);
36 if (it == pview.end()) {
37 // If the node exists, it is actually updating its proof to an unknown
38 // one. In this case we need to remove it so it is not both active and
39 // pending at the same time.
40 removeNode(nodeid);
41 pendingNodes.emplace(proofid, nodeid);
42 return false;
43 }
44
45 return addOrUpdateNode(peers.project<0>(it), nodeid);
46}
47
48bool PeerManager::addOrUpdateNode(const PeerSet::iterator &it, NodeId nodeid) {
49 assert(it != peers.end());
50
51 const PeerId peerid = it->peerid;
52
53 auto nit = nodes.find(nodeid);
54 if (nit == nodes.end()) {
55 if (!nodes.emplace(nodeid, peerid).second) {
56 return false;
57 }
58 } else {
59 const PeerId oldpeerid = nit->peerid;
60 if (!nodes.modify(nit, [&](Node &n) { n.peerid = peerid; })) {
61 return false;
62 }
63
64 // We actually have this node already, we need to update it.
65 bool success = removeNodeFromPeer(peers.find(oldpeerid));
66 assert(success);
67 }
68
69 // Then increase the node counter, and create the slot if needed
70 bool success = addNodeToPeer(it);
71 assert(success);
72
73 // If the added node was in the pending set, remove it
74 pendingNodes.get<by_nodeid>().erase(nodeid);
75
76 // If the proof was in the dangling pool, remove it
77 const ProofId &proofid = it->getProofId();
78 if (danglingProofPool.getProof(proofid)) {
80 }
81
82 // We know for sure there is at least 1 node. Note that this can fail if
83 // there is more than 1, in this case it's a no-op.
84 shareableProofs.insert(it->proof);
85
86 return true;
87}
88
89bool PeerManager::addNodeToPeer(const PeerSet::iterator &it) {
90 assert(it != peers.end());
91 return peers.modify(it, [&](Peer &p) {
92 if (p.node_count++ > 0) {
93 // We are done.
94 return;
95 }
96
97 // We need to allocate this peer.
98 p.index = uint32_t(slots.size());
99 const uint32_t score = p.getScore();
100 const uint64_t start = slotCount;
101 slots.emplace_back(start, score, it->peerid);
102 slotCount = start + score;
103
104 // Add to our allocated score when we allocate a new peer in the slots
105 connectedPeersScore += score;
106 });
107}
108
110 // Remove all the remote proofs from this node
111 auto &remoteProofsView = remoteProofs.get<by_nodeid>();
112 auto [begin, end] = remoteProofsView.equal_range(nodeid);
113 remoteProofsView.erase(begin, end);
114
115 if (pendingNodes.get<by_nodeid>().erase(nodeid) > 0) {
116 // If this was a pending node, there is nothing else to do.
117 return true;
118 }
119
120 auto it = nodes.find(nodeid);
121 if (it == nodes.end()) {
122 return false;
123 }
124
125 const PeerId peerid = it->peerid;
126 nodes.erase(it);
127
128 // Keep the track of the reference count.
129 bool success = removeNodeFromPeer(peers.find(peerid));
130 assert(success);
131
132 return true;
133}
134
135bool PeerManager::removeNodeFromPeer(const PeerSet::iterator &it,
136 uint32_t count) {
137 // It is possible for nodes to be dangling. If there was an inflight query
138 // when the peer gets removed, the node was not erased. In this case there
139 // is nothing to do.
140 if (it == peers.end()) {
141 return true;
142 }
143
144 assert(count <= it->node_count);
145 if (count == 0) {
146 // This is a NOOP.
147 return false;
148 }
149
150 const uint32_t new_count = it->node_count - count;
151 if (!peers.modify(it, [&](Peer &p) { p.node_count = new_count; })) {
152 return false;
153 }
154
155 if (new_count > 0) {
156 // We are done.
157 return true;
158 }
159
160 // There are no more nodes left, we need to clean up. Remove from the radix
161 // tree (unless it's our local proof), subtract allocated score and remove
162 // from slots.
163 if (!localProof || it->getProofId() != localProof->getId()) {
164 const auto removed = shareableProofs.remove(it->getProofId());
165 assert(removed);
166 }
167
168 const size_t i = it->index;
169 assert(i < slots.size());
170 assert(connectedPeersScore >= slots[i].getScore());
171 connectedPeersScore -= slots[i].getScore();
172
173 if (i + 1 == slots.size()) {
174 slots.pop_back();
175 slotCount = slots.empty() ? 0 : slots.back().getStop();
176 } else {
177 fragmentation += slots[i].getScore();
178 slots[i] = slots[i].withPeerId(NO_PEER);
179 }
180
181 return true;
182}
183
185 SteadyMilliseconds timeout,
186 uint64_t round) {
187 auto it = nodes.find(nodeid);
188 if (it == nodes.end()) {
189 return false;
190 }
191
192 return nodes.modify(it, [&](Node &n) {
193 n.nextRequestTime = timeout;
194 n.last_round = round;
195 });
196}
197
199 const Response &response) {
200 auto it = nodes.find(nodeid);
201 if (it == nodes.end()) {
202 return false;
203 }
204
205 if (it->last_round > response.getRound()) {
206 // This is a response for a previous round, ignore it.
207 return false;
208 }
209
210 auto timeout = Now<SteadyMilliseconds>() +
211 std::chrono::milliseconds(response.getCooldown());
212
213 return nodes.modify(it, [&](Node &n) {
214 n.nextRequestTime = timeout;
215 n.last_round = response.getRound();
216 });
217}
218
220 auto it = nodes.find(nodeid);
221 if (it == nodes.end()) {
222 return false;
223 }
224
225 return !it->avaproofsSent &&
226 nodes.modify(it, [&](Node &n) { n.avaproofsSent = true; });
227}
228
229static bool isImmatureState(const ProofValidationState &state) {
231}
232
234 PeerId peerid, const std::chrono::seconds &nextTime) {
235 auto it = peers.find(peerid);
236 if (it == peers.end()) {
237 // No such peer
238 return false;
239 }
240
241 // Make sure we don't move the time in the past.
242 peers.modify(it, [&](Peer &p) {
244 std::max(p.nextPossibleConflictTime, nextTime);
245 });
246
247 return it->nextPossibleConflictTime == nextTime;
248}
249
251 auto it = peers.find(peerid);
252 if (it == peers.end()) {
253 // No such peer
254 return false;
255 }
256
257 peers.modify(it, [&](Peer &p) { p.hasFinalized = true; });
258
259 return true;
260}
261
262template <typename ProofContainer>
263void PeerManager::moveToConflictingPool(const ProofContainer &proofs) {
264 auto &peersView = peers.get<by_proofid>();
265 for (const ProofRef &proof : proofs) {
266 auto it = peersView.find(proof->getId());
267 if (it != peersView.end()) {
268 removePeer(it->peerid);
269 }
270
272 }
273}
274
276 ProofRegistrationState &registrationState,
277 RegistrationMode mode) {
278 assert(proof);
279
280 const ProofId &proofid = proof->getId();
281
282 auto invalidate = [&](ProofRegistrationResult result,
283 const std::string &message) {
284 return registrationState.Invalid(
285 result, message, strprintf("proofid: %s", proofid.ToString()));
286 };
287
288 if ((mode != RegistrationMode::FORCE_ACCEPT ||
289 !isInConflictingPool(proofid)) &&
290 exists(proofid)) {
291 // In default mode, we expect the proof to be unknown, i.e. in none of
292 // the pools.
293 // In forced accept mode, the proof can be in the conflicting pool.
295 "proof-already-registered");
296 }
297
298 if (danglingProofPool.getProof(proofid) &&
299 pendingNodes.count(proofid) == 0) {
300 // Don't attempt to register a proof that we already evicted because it
301 // was dangling, but rather attempt to retrieve an associated node.
302 needMoreNodes = true;
303 return invalidate(ProofRegistrationResult::DANGLING, "dangling-proof");
304 }
305
306 // Check the proof's validity.
307 ProofValidationState validationState;
308 if (!WITH_LOCK(cs_main, return proof->verify(stakeUtxoDustThreshold,
309 chainman, validationState))) {
310 if (isImmatureState(validationState)) {
314 // Adding this proof exceeds the immature pool limit, so evict
315 // the lowest scoring proof.
318 }
319
320 return invalidate(ProofRegistrationResult::IMMATURE,
321 "immature-proof");
322 }
323
324 if (validationState.GetResult() ==
327 "utxo-missing-or-spent");
328 }
329
330 // Reject invalid proof.
331 return invalidate(ProofRegistrationResult::INVALID, "invalid-proof");
332 }
333
334 auto now = GetTime<std::chrono::seconds>();
335 auto nextCooldownTimePoint =
336 now + std::chrono::seconds(gArgs.GetIntArg(
337 "-avalancheconflictingproofcooldown",
339
340 ProofPool::ConflictingProofSet conflictingProofs;
341 switch (validProofPool.addProofIfNoConflict(proof, conflictingProofs)) {
342 case ProofPool::AddProofStatus::REJECTED: {
343 if (mode != RegistrationMode::FORCE_ACCEPT) {
344 auto bestPossibleConflictTime = std::chrono::seconds(0);
345 auto &pview = peers.get<by_proofid>();
346 for (auto &conflictingProof : conflictingProofs) {
347 auto it = pview.find(conflictingProof->getId());
348 assert(it != pview.end());
349
350 // Search the most recent time over the peers
351 bestPossibleConflictTime = std::max(
352 bestPossibleConflictTime, it->nextPossibleConflictTime);
353
355 nextCooldownTimePoint);
356 }
357
358 if (bestPossibleConflictTime > now) {
359 // Cooldown not elapsed, reject the proof.
360 return invalidate(
362 "cooldown-not-elapsed");
363 }
364
365 // Give the proof a chance to replace the conflicting ones.
367 // If we have overridden other proofs due to conflict,
368 // remove the peers and attempt to move them to the
369 // conflicting pool.
370 moveToConflictingPool(conflictingProofs);
371
372 // Replacement is successful, continue to peer creation
373 break;
374 }
375
376 // Not the preferred proof, or replacement is not enabled
378 ProofPool::AddProofStatus::REJECTED
380 "rejected-proof")
382 "conflicting-utxos");
383 }
384
386
387 // Move the conflicting proofs from the valid pool to the
388 // conflicting pool
389 moveToConflictingPool(conflictingProofs);
390
391 auto status = validProofPool.addProofIfNoConflict(proof);
392 assert(status == ProofPool::AddProofStatus::SUCCEED);
393
394 break;
395 }
396 case ProofPool::AddProofStatus::DUPLICATED:
397 // If the proof was already in the pool, don't duplicate the peer.
399 "proof-already-registered");
400 case ProofPool::AddProofStatus::SUCCEED:
401 break;
402
403 // No default case, so the compiler can warn about missing cases
404 }
405
406 // At this stage we are going to create a peer so the proof should never
407 // exist in the conflicting pool, but use belt and suspenders.
409
410 // New peer means new peerid!
411 const PeerId peerid = nextPeerId++;
412
413 // We have no peer for this proof, time to create it.
414 auto inserted = peers.emplace(peerid, proof, nextCooldownTimePoint);
415 assert(inserted.second);
416
417 if (localProof && proof->getId() == localProof->getId()) {
418 // Add it to the shareable proofs even if there is no node, we are the
419 // node. Otherwise it will be inserted after a node is attached to the
420 // proof.
421 shareableProofs.insert(proof);
422 }
423
424 // Add to our registered score when adding to the peer list
425 totalPeersScore += proof->getScore();
426
427 // If there are nodes waiting for this proof, add them
428 auto &pendingNodesView = pendingNodes.get<by_proofid>();
429 auto range = pendingNodesView.equal_range(proofid);
430
431 // We want to update the nodes then remove them from the pending set. That
432 // will invalidate the range iterators, so we need to save the node ids
433 // first before we can loop over them.
434 std::vector<NodeId> nodeids;
435 nodeids.reserve(std::distance(range.first, range.second));
436 std::transform(range.first, range.second, std::back_inserter(nodeids),
437 [](const PendingNode &n) { return n.nodeid; });
438
439 for (const NodeId &nodeid : nodeids) {
440 addOrUpdateNode(inserted.first, nodeid);
441 }
442
444 addStakeContender(proof);
445 }
446
447 return true;
448}
449
451 if (isDangling(proofid) && mode == RejectionMode::INVALIDATE) {
453 return true;
454 }
455
456 if (!exists(proofid)) {
457 return false;
458 }
459
460 if (immatureProofPool.removeProof(proofid)) {
461 return true;
462 }
463
464 if (mode == RejectionMode::DEFAULT &&
466 // In default mode we keep the proof in the conflicting pool
467 return true;
468 }
469
470 if (mode == RejectionMode::INVALIDATE &&
472 // In invalidate mode we remove the proof completely
473 return true;
474 }
475
476 auto &pview = peers.get<by_proofid>();
477 auto it = pview.find(proofid);
478 assert(it != pview.end());
479
480 const ProofRef proof = it->proof;
481
482 if (!removePeer(it->peerid)) {
483 return false;
484 }
485
486 // If there was conflicting proofs, attempt to pull them back
487 for (const SignedStake &ss : proof->getStakes()) {
488 const ProofRef conflictingProof =
489 conflictingProofPool.getProof(ss.getStake().getUTXO());
490 if (!conflictingProof) {
491 continue;
492 }
493
494 conflictingProofPool.removeProof(conflictingProof->getId());
495 registerProof(conflictingProof);
496 }
497
498 if (mode == RejectionMode::DEFAULT) {
500 }
501
502 return true;
503}
504
506 std::unordered_set<ProofRef, SaltedProofHasher> &registeredProofs) {
507 registeredProofs.clear();
508 const auto now = GetTime<std::chrono::seconds>();
509
510 std::vector<ProofRef> newlyDanglingProofs;
511 for (const Peer &peer : peers) {
512 // If the peer is not our local proof, has been registered for some
513 // time and has no node attached, discard it.
514 if ((!localProof || peer.getProofId() != localProof->getId()) &&
515 peer.node_count == 0 &&
516 (peer.registration_time + Peer::DANGLING_TIMEOUT) <= now) {
517 // Check the remotes status to determine if we should set the proof
518 // as dangling. This prevents from dropping a proof on our own due
519 // to a network issue. If the remote presence status is inconclusive
520 // we assume our own position (missing = false).
521 if (!getRemotePresenceStatus(peer.getProofId()).value_or(false)) {
522 newlyDanglingProofs.push_back(peer.proof);
523 }
524 }
525 }
526
527 // Similarly, check if we have dangling proofs that could be pulled back
528 // because the network says so.
529 std::vector<ProofRef> previouslyDanglingProofs;
530 danglingProofPool.forEachProof([&](const ProofRef &proof) {
531 if (getRemotePresenceStatus(proof->getId()).value_or(false)) {
532 previouslyDanglingProofs.push_back(proof);
533 }
534 });
535 for (const ProofRef &proof : previouslyDanglingProofs) {
536 danglingProofPool.removeProof(proof->getId());
537 if (registerProof(proof)) {
538 registeredProofs.insert(proof);
539 }
540 }
541
542 for (const ProofRef &proof : newlyDanglingProofs) {
543 rejectProof(proof->getId(), RejectionMode::INVALIDATE);
545 // If the proof is added, it means there is no better conflicting
546 // dangling proof and this is not a duplicated, so it's worth
547 // printing a message to the log.
549 "Proof dangling for too long (no connected node): %s\n",
550 proof->getId().GetHex());
551 }
552 }
553
554 // If we have dangling proof, this is a good indicator that we need to
555 // request more nodes from our peers.
556 needMoreNodes = !newlyDanglingProofs.empty();
557}
558
560 for (int retry = 0; retry < SELECT_NODE_MAX_RETRY; retry++) {
561 const PeerId p = selectPeer();
562
563 // If we cannot find a peer, it may be due to the fact that it is
564 // unlikely due to high fragmentation, so compact and retry.
565 if (p == NO_PEER) {
566 compact();
567 continue;
568 }
569
570 // See if that peer has an available node.
571 auto &nview = nodes.get<next_request_time>();
572 auto it = nview.lower_bound(boost::make_tuple(p, SteadyMilliseconds()));
573 if (it != nview.end() && it->peerid == p &&
574 it->nextRequestTime <= Now<SteadyMilliseconds>()) {
575 return it->nodeid;
576 }
577 }
578
579 // We failed to find a node to query, flag this so we can request more
580 needMoreNodes = true;
581
582 return NO_NODE;
583}
584
585std::unordered_set<ProofRef, SaltedProofHasher> PeerManager::updatedBlockTip() {
586 std::vector<ProofId> invalidProofIds;
587 std::vector<ProofRef> newImmatures;
588
589 {
590 LOCK(cs_main);
591
592 for (const auto &p : peers) {
594 if (!p.proof->verify(stakeUtxoDustThreshold, chainman, state)) {
595 if (isImmatureState(state)) {
596 newImmatures.push_back(p.proof);
597 }
598 invalidProofIds.push_back(p.getProofId());
599
601 "Invalidating proof %s: verification failed (%s)\n",
602 p.proof->getId().GetHex(), state.ToString());
603 }
604 }
605
606 // Disable thread safety analysis here because it does not play nicely
607 // with the lambda
609 [&](const ProofRef &proof) NO_THREAD_SAFETY_ANALYSIS {
612 if (!proof->verify(stakeUtxoDustThreshold, chainman, state)) {
613 invalidProofIds.push_back(proof->getId());
614
615 LogPrint(
617 "Invalidating dangling proof %s: verification failed "
618 "(%s)\n",
619 proof->getId().GetHex(), state.ToString());
620 }
621 });
622 }
623
624 // Remove the invalid proofs before the immature rescan. This makes it
625 // possible to pull back proofs with utxos that conflicted with these
626 // invalid proofs.
627 for (const ProofId &invalidProofId : invalidProofIds) {
628 rejectProof(invalidProofId, RejectionMode::INVALIDATE);
629 }
630
631 auto registeredProofs = immatureProofPool.rescan(*this);
632
633 for (auto &p : newImmatures) {
635 }
636
637 return registeredProofs;
638}
639
641 ProofRef proof;
642
643 forPeer(proofid, [&](const Peer &p) {
644 proof = p.proof;
645 return true;
646 });
647
648 if (!proof) {
649 proof = conflictingProofPool.getProof(proofid);
650 }
651
652 if (!proof) {
653 proof = immatureProofPool.getProof(proofid);
654 }
655
656 return proof;
657}
658
659bool PeerManager::isBoundToPeer(const ProofId &proofid) const {
660 auto &pview = peers.get<by_proofid>();
661 return pview.find(proofid) != pview.end();
662}
663
664bool PeerManager::isImmature(const ProofId &proofid) const {
665 return immatureProofPool.getProof(proofid) != nullptr;
666}
667
668bool PeerManager::isInConflictingPool(const ProofId &proofid) const {
669 return conflictingProofPool.getProof(proofid) != nullptr;
670}
671
672bool PeerManager::isDangling(const ProofId &proofid) const {
673 return danglingProofPool.getProof(proofid) != nullptr;
674}
675
676void PeerManager::setInvalid(const ProofId &proofid) {
677 invalidProofs.insert(proofid);
678}
679
680bool PeerManager::isInvalid(const ProofId &proofid) const {
681 return invalidProofs.contains(proofid);
682}
683
686}
687
688bool PeerManager::saveRemoteProof(const ProofId &proofid, const NodeId nodeid,
689 const bool present) {
690 if (present && isStakingPreconsensusActivated() && isBoundToPeer(proofid) &&
691 !isRemotelyPresentProof(proofid)) {
692 // If this is the first time this peer's proof becomes a remote proof of
693 // any node, ensure it is included in the contender cache. There is a
694 // special case where the contender cache can lose track of a proof if
695 // it is not saved as a remote proof before the next finalized block
696 // (triggering promotion, where non-remote cache entries are dropped).
697 // This does not happen in the hot path since receiving a proof
698 // immediately saves it as a remote, however it becomes more likely if
699 // the proof was loaded from a file (-persistavapeers) or added via RPC.
700 addStakeContender(getProof(proofid));
701 }
702
703 // Get how many proofs this node has announced
704 auto &remoteProofsByLastUpdate = remoteProofs.get<by_lastUpdate>();
705 auto [begin, end] = remoteProofsByLastUpdate.equal_range(nodeid);
706
707 // Limit the number of proofs a single node can save:
708 // - At least MAX_REMOTE_PROOFS
709 // - Up to 2x as much as we have
710 // The MAX_REMOTE_PROOFS minimum is there to ensure we don't overlimit at
711 // startup when we don't have proofs yet.
712 while (size_t(std::distance(begin, end)) >=
713 std::max(MAX_REMOTE_PROOFS, 2 * peers.size())) {
714 // Remove the proof with the oldest update time
715 begin = remoteProofsByLastUpdate.erase(begin);
716 }
717
718 auto it = remoteProofs.find(boost::make_tuple(proofid, nodeid));
719 if (it != remoteProofs.end()) {
720 remoteProofs.erase(it);
721 }
722
723 return remoteProofs
724 .emplace(RemoteProof{proofid, nodeid, GetTime<std::chrono::seconds>(),
725 present})
726 .second;
727}
728
729std::vector<RemoteProof>
731 std::vector<RemoteProof> nodeRemoteProofs;
732
733 auto &remoteProofsByLastUpdate = remoteProofs.get<by_lastUpdate>();
734 auto [begin, end] = remoteProofsByLastUpdate.equal_range(nodeid);
735
736 for (auto &it = begin; it != end; it++) {
737 nodeRemoteProofs.emplace_back(*it);
738 }
739
740 return nodeRemoteProofs;
741}
742
743bool PeerManager::hasRemoteProofStatus(const ProofId &proofid) const {
744 auto &view = remoteProofs.get<by_proofid>();
745 return view.count(proofid) > 0;
746}
747
749 auto &view = remoteProofs.get<by_proofid>();
750 auto [begin, end] = view.equal_range(proofid);
751 return std::any_of(begin, end, [](const auto &remoteProof) {
752 return remoteProof.present;
753 });
754}
755
756bool PeerManager::removePeer(const PeerId peerid) {
757 auto it = peers.find(peerid);
758 if (it == peers.end()) {
759 return false;
760 }
761
762 // Remove all nodes from this peer.
763 removeNodeFromPeer(it, it->node_count);
764
765 auto &nview = nodes.get<next_request_time>();
766
767 // Add the nodes to the pending set
768 auto range = nview.equal_range(peerid);
769 for (auto &nit = range.first; nit != range.second; ++nit) {
770 pendingNodes.emplace(it->getProofId(), nit->nodeid);
771 };
772
773 // Remove nodes associated with this peer, unless their timeout is still
774 // active. This ensure that we don't overquery them in case they are
775 // subsequently added to another peer.
776 nview.erase(
777 nview.lower_bound(boost::make_tuple(peerid, SteadyMilliseconds())),
778 nview.upper_bound(
779 boost::make_tuple(peerid, Now<SteadyMilliseconds>())));
780
781 // Release UTXOs attached to this proof.
782 validProofPool.removeProof(it->getProofId());
783
784 // If there were nodes attached, remove from the radix tree as well
785 auto removed = shareableProofs.remove(Uint256RadixKey(it->getProofId()));
786
787 m_unbroadcast_proofids.erase(it->getProofId());
788
789 // Remove the peer from the PeerSet and remove its score from the registered
790 // score total.
791 assert(totalPeersScore >= it->getScore());
792 totalPeersScore -= it->getScore();
793 peers.erase(it);
794 return true;
795}
796
798 if (slots.empty() || slotCount == 0) {
799 return NO_PEER;
800 }
801
802 const uint64_t max = slotCount;
803 for (int retry = 0; retry < SELECT_PEER_MAX_RETRY; retry++) {
804 size_t i =
805 selectPeerImpl(slots, FastRandomContext().randrange(max), max);
806 if (i != NO_PEER) {
807 return i;
808 }
809 }
810
811 return NO_PEER;
812}
813
815 // There is nothing to compact.
816 if (fragmentation == 0) {
817 return 0;
818 }
819
820 std::vector<Slot> newslots;
821 newslots.reserve(peers.size());
822
823 uint64_t prevStop = 0;
824 uint32_t i = 0;
825 for (auto it = peers.begin(); it != peers.end(); it++) {
826 if (it->node_count == 0) {
827 continue;
828 }
829
830 newslots.emplace_back(prevStop, it->getScore(), it->peerid);
831 prevStop = slots[i].getStop();
832 if (!peers.modify(it, [&](Peer &p) { p.index = i++; })) {
833 return 0;
834 }
835 }
836
837 slots = std::move(newslots);
838
839 const uint64_t saved = slotCount - prevStop;
840 slotCount = prevStop;
841 fragmentation = 0;
842
843 return saved;
844}
845
847 uint64_t prevStop = 0;
848 uint32_t scoreFromSlots = 0;
849 for (size_t i = 0; i < slots.size(); i++) {
850 const Slot &s = slots[i];
851
852 // Slots must be in correct order.
853 if (s.getStart() < prevStop) {
854 return false;
855 }
856
857 prevStop = s.getStop();
858
859 // If this is a dead slot, then nothing more needs to be checked.
860 if (s.getPeerId() == NO_PEER) {
861 continue;
862 }
863
864 // We have a live slot, verify index.
865 auto it = peers.find(s.getPeerId());
866 if (it == peers.end() || it->index != i) {
867 return false;
868 }
869
870 // Accumulate score across slots
871 scoreFromSlots += slots[i].getScore();
872 }
873
874 // Score across slots must be the same as our allocated score
875 if (scoreFromSlots != connectedPeersScore) {
876 return false;
877 }
878
879 uint32_t scoreFromAllPeers = 0;
880 uint32_t scoreFromPeersWithNodes = 0;
881
882 std::unordered_set<COutPoint, SaltedOutpointHasher> peersUtxos;
883 for (const auto &p : peers) {
884 // Accumulate the score across peers to compare with total known score
885 scoreFromAllPeers += p.getScore();
886
887 // A peer should have a proof attached
888 if (!p.proof) {
889 return false;
890 }
891
892 // Check proof pool consistency
893 for (const auto &ss : p.proof->getStakes()) {
894 const COutPoint &outpoint = ss.getStake().getUTXO();
895 auto proof = validProofPool.getProof(outpoint);
896
897 if (!proof) {
898 // Missing utxo
899 return false;
900 }
901 if (proof != p.proof) {
902 // Wrong proof
903 return false;
904 }
905
906 if (!peersUtxos.emplace(outpoint).second) {
907 // Duplicated utxo
908 return false;
909 }
910 }
911
912 // Count node attached to this peer.
913 const auto count_nodes = [&]() {
914 size_t count = 0;
915 auto &nview = nodes.get<next_request_time>();
916 auto begin = nview.lower_bound(
917 boost::make_tuple(p.peerid, SteadyMilliseconds()));
918 auto end = nview.upper_bound(
919 boost::make_tuple(p.peerid + 1, SteadyMilliseconds()));
920
921 for (auto it = begin; it != end; ++it) {
922 count++;
923 }
924
925 return count;
926 };
927
928 if (p.node_count != count_nodes()) {
929 return false;
930 }
931
932 // If there are no nodes attached to this peer, then we are done.
933 if (p.node_count == 0) {
934 continue;
935 }
936
937 scoreFromPeersWithNodes += p.getScore();
938 // The index must point to a slot refering to this peer.
939 if (p.index >= slots.size() || slots[p.index].getPeerId() != p.peerid) {
940 return false;
941 }
942
943 // If the score do not match, same thing.
944 if (slots[p.index].getScore() != p.getScore()) {
945 return false;
946 }
947
948 // Check the proof is in the radix tree only if there are nodes attached
949 if (((localProof && p.getProofId() == localProof->getId()) ||
950 p.node_count > 0) &&
951 shareableProofs.get(p.getProofId()) == nullptr) {
952 return false;
953 }
954 if (p.node_count == 0 &&
955 shareableProofs.get(p.getProofId()) != nullptr) {
956 return false;
957 }
958 }
959
960 // Check our accumulated scores against our registred and allocated scores
961 if (scoreFromAllPeers != totalPeersScore) {
962 return false;
963 }
964 if (scoreFromPeersWithNodes != connectedPeersScore) {
965 return false;
966 }
967
968 // We checked the utxo consistency for all our peers utxos already, so if
969 // the pool size differs from the expected one there are dangling utxos.
970 if (validProofPool.size() != peersUtxos.size()) {
971 return false;
972 }
973
974 // Check there is no dangling proof in the radix tree
976 return isBoundToPeer(pLeaf->getId());
977 });
978}
979
980PeerId selectPeerImpl(const std::vector<Slot> &slots, const uint64_t slot,
981 const uint64_t max) {
982 assert(slot <= max);
983
984 size_t begin = 0, end = slots.size();
985 uint64_t bottom = 0, top = max;
986
987 // Try to find the slot using dichotomic search.
988 while ((end - begin) > 8) {
989 // The slot we picked in not allocated.
990 if (slot < bottom || slot >= top) {
991 return NO_PEER;
992 }
993
994 // Guesstimate the position of the slot.
995 size_t i = begin + ((slot - bottom) * (end - begin) / (top - bottom));
996 assert(begin <= i && i < end);
997
998 // We have a match.
999 if (slots[i].contains(slot)) {
1000 return slots[i].getPeerId();
1001 }
1002
1003 // We undershooted.
1004 if (slots[i].precedes(slot)) {
1005 begin = i + 1;
1006 if (begin >= end) {
1007 return NO_PEER;
1008 }
1009
1010 bottom = slots[begin].getStart();
1011 continue;
1012 }
1013
1014 // We overshooted.
1015 if (slots[i].follows(slot)) {
1016 end = i;
1017 top = slots[end].getStart();
1018 continue;
1019 }
1020
1021 // We have an unalocated slot.
1022 return NO_PEER;
1023 }
1024
1025 // Enough of that nonsense, let fallback to linear search.
1026 for (size_t i = begin; i < end; i++) {
1027 // We have a match.
1028 if (slots[i].contains(slot)) {
1029 return slots[i].getPeerId();
1030 }
1031 }
1032
1033 // We failed to find a slot, retry.
1034 return NO_PEER;
1035}
1036
1038 // The proof should be bound to a peer
1039 if (isBoundToPeer(proofid)) {
1040 m_unbroadcast_proofids.insert(proofid);
1041 }
1042}
1043
1045 m_unbroadcast_proofids.erase(proofid);
1046}
1047
1049 const CBlockIndex *pprev,
1050 std::vector<std::pair<ProofId, CScript>> &winners) {
1051 if (!pprev) {
1052 return false;
1053 }
1054
1055 // Don't select proofs that have not been known for long enough, i.e. at
1056 // least since twice the dangling proof cleanup timeout before the last
1057 // block time, so we're sure to not account for proofs more recent than the
1058 // previous block or lacking node connected.
1059 // The previous block time is capped to now for the unlikely event the
1060 // previous block time is in the future.
1061 auto registrationDelay = std::chrono::duration_cast<std::chrono::seconds>(
1063 auto maxRegistrationDelay =
1064 std::chrono::duration_cast<std::chrono::seconds>(
1066 auto minRegistrationDelay =
1067 std::chrono::duration_cast<std::chrono::seconds>(
1069
1070 const int64_t refTime = std::min(pprev->GetBlockTime(), GetTime());
1071
1072 const int64_t targetRegistrationTime = refTime - registrationDelay.count();
1073 const int64_t maxRegistrationTime = refTime - minRegistrationDelay.count();
1074 const int64_t minRegistrationTime = refTime - maxRegistrationDelay.count();
1075
1076 const BlockHash prevblockhash = pprev->GetBlockHash();
1077
1078 std::vector<ProofRef> selectedProofs;
1079 ProofRef firstCompliantProof = ProofRef();
1080 while (selectedProofs.size() < peers.size()) {
1081 double bestRewardRank = std::numeric_limits<double>::max();
1082 ProofRef selectedProof = ProofRef();
1083 int64_t selectedProofRegistrationTime{0};
1084 StakeContenderId bestRewardHash;
1085
1086 for (const Peer &peer : peers) {
1087 if (!peer.proof) {
1088 // Should never happen, continue
1089 continue;
1090 }
1091
1092 if (!peer.hasFinalized ||
1093 peer.registration_time.count() >= maxRegistrationTime) {
1094 continue;
1095 }
1096
1097 if (std::find_if(selectedProofs.begin(), selectedProofs.end(),
1098 [&peer](const ProofRef &proof) {
1099 return peer.getProofId() == proof->getId();
1100 }) != selectedProofs.end()) {
1101 continue;
1102 }
1103
1104 StakeContenderId proofRewardHash(prevblockhash, peer.getProofId());
1105 if (proofRewardHash == uint256::ZERO) {
1106 // This either the result of an incredibly unlikely lucky hash,
1107 // or a the hash is getting abused. In this case, skip the
1108 // proof.
1109 LogPrintf(
1110 "Staking reward hash has a suspicious value of zero for "
1111 "proof %s and blockhash %s, skipping\n",
1112 peer.getProofId().ToString(), prevblockhash.ToString());
1113 continue;
1114 }
1115
1116 double proofRewardRank =
1117 proofRewardHash.ComputeProofRewardRank(peer.getScore());
1118 // If selectedProof is nullptr, this means that bestRewardRank is
1119 // MAX_DOUBLE so the comparison will always select this proof as the
1120 // preferred one. As a consequence it is safe to use 0 as a proofid.
1122 proofRewardHash, proofRewardRank, peer.getProofId(),
1123 bestRewardHash, bestRewardRank,
1124 selectedProof ? selectedProof->getId()
1125 : ProofId(uint256::ZERO))) {
1126 bestRewardRank = proofRewardRank;
1127 selectedProof = peer.proof;
1128 selectedProofRegistrationTime = peer.registration_time.count();
1129 bestRewardHash = proofRewardHash;
1130 }
1131 }
1132
1133 if (!selectedProof) {
1134 // No winner
1135 break;
1136 }
1137
1138 if (!firstCompliantProof &&
1139 selectedProofRegistrationTime < targetRegistrationTime) {
1140 firstCompliantProof = selectedProof;
1141 }
1142
1143 selectedProofs.push_back(selectedProof);
1144
1145 if (selectedProofRegistrationTime < minRegistrationTime &&
1146 !isFlaky(selectedProof->getId())) {
1147 break;
1148 }
1149 }
1150
1151 winners.clear();
1152
1153 if (!firstCompliantProof) {
1154 return false;
1155 }
1156
1157 winners.reserve(selectedProofs.size());
1158
1159 // Find the winner
1160 for (const ProofRef &proof : selectedProofs) {
1161 if (proof->getId() == firstCompliantProof->getId()) {
1162 winners.push_back({proof->getId(), proof->getPayoutScript()});
1163 }
1164 }
1165 // Add the others (if any) after the winner
1166 for (const ProofRef &proof : selectedProofs) {
1167 if (proof->getId() != firstCompliantProof->getId()) {
1168 winners.push_back({proof->getId(), proof->getPayoutScript()});
1169 }
1170 }
1171
1172 return true;
1173}
1174
1175bool PeerManager::setFlaky(const ProofId &proofid) {
1176 return manualFlakyProofids.insert(proofid).second;
1177}
1178
1179bool PeerManager::unsetFlaky(const ProofId &proofid) {
1180 return manualFlakyProofids.erase(proofid) > 0;
1181}
1182
1183bool PeerManager::isFlaky(const ProofId &proofid) const {
1184 if (localProof && proofid == localProof->getId()) {
1185 return false;
1186 }
1187
1188 if (manualFlakyProofids.count(proofid) > 0) {
1189 return true;
1190 }
1191
1192 // If we are missing connection to this proof, consider flaky
1193 if (forPeer(proofid,
1194 [](const Peer &peer) { return peer.node_count == 0; })) {
1195 return true;
1196 }
1197
1198 auto &remoteProofsByNodeId = remoteProofs.get<by_nodeid>();
1199 auto &nview = nodes.get<next_request_time>();
1200
1201 std::unordered_map<PeerId, std::unordered_set<ProofId, SaltedProofIdHasher>>
1202 missing_per_peer;
1203
1204 // Construct a set of missing proof ids per peer
1205 double total_score{0};
1206 for (const Peer &peer : peers) {
1207 const PeerId peerid = peer.peerid;
1208
1209 total_score += peer.getScore();
1210
1211 auto nodes_range = nview.equal_range(peerid);
1212 for (auto &nit = nodes_range.first; nit != nodes_range.second; ++nit) {
1213 auto proofs_range = remoteProofsByNodeId.equal_range(nit->nodeid);
1214 for (auto &proofit = proofs_range.first;
1215 proofit != proofs_range.second; ++proofit) {
1216 if (!proofit->present) {
1217 missing_per_peer[peerid].insert(proofit->proofid);
1218 }
1219 }
1220 };
1221 }
1222
1223 double missing_score{0};
1224
1225 // Now compute a score for the missing proof
1226 for (const auto &[peerid, missingProofs] : missing_per_peer) {
1227 if (missingProofs.size() > 3) {
1228 // Ignore peers with too many missing proofs
1229 continue;
1230 }
1231
1232 auto pit = peers.find(peerid);
1233 if (pit == peers.end()) {
1234 // Peer not found
1235 continue;
1236 }
1237
1238 if (missingProofs.count(proofid) > 0) {
1239 missing_score += pit->getScore();
1240 }
1241 }
1242
1243 return (missing_score / total_score) > 0.3;
1244}
1245
1246std::optional<bool>
1248 auto &remoteProofsView = remoteProofs.get<by_proofid>();
1249 auto [begin, end] = remoteProofsView.equal_range(proofid);
1250
1251 if (begin == end) {
1252 // No remote registered anything yet, we are on our own
1253 return std::nullopt;
1254 }
1255
1256 double total_score{0};
1257 double present_score{0};
1258 double missing_score{0};
1259
1260 for (auto it = begin; it != end; it++) {
1261 auto nit = nodes.find(it->nodeid);
1262 if (nit == nodes.end()) {
1263 // No such node
1264 continue;
1265 }
1266
1267 const PeerId peerid = nit->peerid;
1268
1269 auto pit = peers.find(peerid);
1270 if (pit == peers.end()) {
1271 // Peer not found
1272 continue;
1273 }
1274
1275 uint32_t node_count = pit->node_count;
1276 if (localProof && pit->getProofId() == localProof->getId()) {
1277 // If that's our local proof, account for ourself
1278 ++node_count;
1279 }
1280
1281 if (node_count == 0) {
1282 // should never happen
1283 continue;
1284 }
1285
1286 const double score = double(pit->getScore()) / node_count;
1287
1288 total_score += score;
1289 if (it->present) {
1290 present_score += score;
1291 } else {
1292 missing_score += score;
1293 }
1294 }
1295
1296 if (localProof) {
1297 auto &peersByProofid = peers.get<by_proofid>();
1298
1299 // Do we have a node connected for that proof ?
1300 bool present = false;
1301 auto pit = peersByProofid.find(proofid);
1302 if (pit != peersByProofid.end()) {
1303 present = pit->node_count > 0;
1304 }
1305
1306 pit = peersByProofid.find(localProof->getId());
1307 if (pit != peersByProofid.end()) {
1308 // Also divide by node_count, we can have several nodes even for our
1309 // local proof.
1310 const double score =
1311 double(pit->getScore()) / (1 + pit->node_count);
1312
1313 total_score += score;
1314 if (present) {
1315 present_score += score;
1316 } else {
1317 missing_score += score;
1318 }
1319 }
1320 }
1321
1322 if (present_score / total_score > 0.55) {
1323 return std::make_optional(true);
1324 }
1325
1326 if (missing_score / total_score > 0.55) {
1327 return std::make_optional(false);
1328 }
1329
1330 return std::nullopt;
1331}
1332
1333bool PeerManager::dumpPeersToFile(const fs::path &dumpPath) const {
1334 try {
1335 const fs::path dumpPathTmp = dumpPath + ".new";
1336 FILE *filestr = fsbridge::fopen(dumpPathTmp, "wb");
1337 if (!filestr) {
1338 return false;
1339 }
1340
1341 AutoFile file{filestr};
1342 file << PEERS_DUMP_VERSION;
1343 file << uint64_t(peers.size());
1344 for (const Peer &peer : peers) {
1345 file << peer.proof;
1346 file << peer.hasFinalized;
1347 file << int64_t(peer.registration_time.count());
1348 file << int64_t(peer.nextPossibleConflictTime.count());
1349 }
1350
1351 if (!FileCommit(file.Get())) {
1352 throw std::runtime_error(strprintf("Failed to commit to file %s",
1353 PathToString(dumpPathTmp)));
1354 }
1355 file.fclose();
1356
1357 if (!RenameOver(dumpPathTmp, dumpPath)) {
1358 throw std::runtime_error(strprintf("Rename failed from %s to %s",
1359 PathToString(dumpPathTmp),
1360 PathToString(dumpPath)));
1361 }
1362 } catch (const std::exception &e) {
1363 LogPrint(BCLog::AVALANCHE, "Failed to dump the avalanche peers: %s.\n",
1364 e.what());
1365 return false;
1366 }
1367
1368 LogPrint(BCLog::AVALANCHE, "Successfully dumped %d peers to %s.\n",
1369 peers.size(), PathToString(dumpPath));
1370
1371 return true;
1372}
1373
1375 const fs::path &dumpPath,
1376 std::unordered_set<ProofRef, SaltedProofHasher> &registeredProofs) {
1377 registeredProofs.clear();
1378
1379 FILE *filestr = fsbridge::fopen(dumpPath, "rb");
1380 AutoFile file{filestr};
1381 if (file.IsNull()) {
1383 "Failed to open avalanche peers file from disk.\n");
1384 return false;
1385 }
1386
1387 try {
1388 uint64_t version;
1389 file >> version;
1390
1391 if (version != PEERS_DUMP_VERSION) {
1393 "Unsupported avalanche peers file version.\n");
1394 return false;
1395 }
1396
1397 uint64_t numPeers;
1398 file >> numPeers;
1399
1400 auto &peersByProofId = peers.get<by_proofid>();
1401
1402 for (uint64_t i = 0; i < numPeers; i++) {
1403 ProofRef proof;
1404 bool hasFinalized;
1405 int64_t registrationTime;
1406 int64_t nextPossibleConflictTime;
1407
1408 file >> proof;
1409 file >> hasFinalized;
1410 file >> registrationTime;
1411 file >> nextPossibleConflictTime;
1412
1413 if (registerProof(proof)) {
1414 auto it = peersByProofId.find(proof->getId());
1415 if (it == peersByProofId.end()) {
1416 // Should never happen
1417 continue;
1418 }
1419
1420 // We don't modify any key so we don't need to rehash.
1421 // If the modify fails, it means we don't get the full benefit
1422 // from the file but we still added our peer to the set. The
1423 // non-overridden fields will be set the normal way.
1424 peersByProofId.modify(it, [&](Peer &p) {
1425 p.hasFinalized = hasFinalized;
1427 std::chrono::seconds{registrationTime};
1429 std::chrono::seconds{nextPossibleConflictTime};
1430 });
1431
1432 registeredProofs.insert(proof);
1433 }
1434 }
1435 } catch (const std::exception &e) {
1437 "Failed to read the avalanche peers file data on disk: %s.\n",
1438 e.what());
1439 return false;
1440 }
1441
1442 return true;
1443}
1444
1445void PeerManager::cleanupStakeContenders(const int requestedMinHeight) {
1446 stakeContenderCache.cleanup(requestedMinHeight);
1447}
1448
1450 const CBlockIndex *tip = WITH_LOCK(cs_main, return chainman.ActiveTip());
1451 stakeContenderCache.add(tip, proof);
1452
1453 const BlockHash blockhash = tip->GetBlockHash();
1454 const ProofId &proofid = proof->getId();
1456 "Cached stake contender with proofid %s, payout %s at block "
1457 "%s (height %d) with id %s\n",
1458 proofid.ToString(), HexStr(proof->getPayoutScript()),
1459 blockhash.ToString(), tip->nHeight,
1460 StakeContenderId(blockhash, proofid).ToString());
1461}
1462
1464 BlockHash &prevblockhashout) const {
1465 return stakeContenderCache.getVoteStatus(contenderId, prevblockhashout);
1466}
1467
1469 stakeContenderCache.accept(contenderId);
1470}
1471
1473 const StakeContenderId &contenderId, BlockHash &prevblockhash,
1474 std::vector<std::pair<ProofId, CScript>> &newWinners) {
1475 stakeContenderCache.finalize(contenderId);
1476
1477 // Get block hash related to this contender. We should not assume the
1478 // current chain tip is the block this contender is a winner for.
1479 getStakeContenderStatus(contenderId, prevblockhash);
1480
1481 // Calculate the new winners for this block
1482 stakeContenderCache.getWinners(prevblockhash, newWinners);
1483}
1484
1486 stakeContenderCache.reject(contenderId);
1487}
1488
1490 stakeContenderCache.promoteToBlock(pindex, [&](const ProofId &proofid) {
1491 return isBoundToPeer(proofid) ||
1492 // isDangling check appears redundant, but remote proofs are not
1493 // guaranteed to be cleaned up when one of our peers is removed
1494 // for dangling too long. Whether or not a proof is dangling is
1495 // gated by remote presence status, so only proofs that are very
1496 // poorly connected to the network will stop being promoted.
1497 (isRemotelyPresentProof(proofid) && isDangling(proofid));
1498 });
1499}
1500
1502 const CBlockIndex *prevblock,
1503 const std::vector<std::pair<ProofId, CScript>> winners, size_t maxPollable,
1504 std::vector<StakeContenderId> &pollableContenders) {
1505 const BlockHash prevblockhash = prevblock->GetBlockHash();
1506 // Set status for local winners
1507 for (const auto &winner : winners) {
1508 const StakeContenderId contenderId(prevblockhash, winner.first);
1509 stakeContenderCache.finalize(contenderId);
1511 "Stake contender set as local winner: proofid %s, payout "
1512 "%s at block %s (height %d) with id %s\n",
1513 winner.first.ToString(), HexStr(winner.second),
1514 prevblockhash.ToString(), prevblock->nHeight,
1515 contenderId.ToString());
1516 }
1517
1518 // Treat the highest ranking contender similarly to local winners except
1519 // that it is not automatically included in the winner set (unless it
1520 // happens to be selected as a local winner).
1521 if (stakeContenderCache.getPollableContenders(prevblockhash, maxPollable,
1522 pollableContenders) > 0) {
1523 // Accept the highest ranking contender. This is a no-op if the highest
1524 // ranking contender is already the local winner.
1525 stakeContenderCache.accept(pollableContenders[0]);
1527 "Stake contender set as best contender: id %s at block "
1528 "%s (height %d)\n",
1529 pollableContenders[0].ToString(), prevblockhash.ToString(),
1530 prevblock->nHeight);
1531 return true;
1532 }
1533
1534 return false;
1535}
1536
1538 const CBlockIndex *pindex, const std::vector<CScript> &payoutScripts) {
1539 return stakeContenderCache.setWinners(pindex, payoutScripts);
1540}
1541
1542} // namespace avalanche
ArgsManager gArgs
Definition: args.cpp:39
static constexpr PeerId NO_PEER
Definition: node.h:16
uint32_t PeerId
Definition: node.h:15
static constexpr size_t AVALANCHE_DEFAULT_CONFLICTING_PROOF_COOLDOWN
Conflicting proofs cooldown time default value in seconds.
Definition: avalanche.h:21
int64_t GetIntArg(const std::string &strArg, int64_t nDefault) const
Return integer argument or default value.
Definition: args.cpp:494
Non-refcounted RAII wrapper for FILE*.
Definition: streams.h:430
The block chain is a tree shaped structure starting with the genesis block at the root,...
Definition: blockindex.h:25
int64_t GetBlockTime() const
Definition: blockindex.h:160
BlockHash GetBlockHash() const
Definition: blockindex.h:130
int nHeight
height of the entry in the chain. The genesis block has height 0
Definition: blockindex.h:38
void insert(Span< const uint8_t > vKey)
Definition: bloom.cpp:215
bool contains(Span< const uint8_t > vKey) const
Definition: bloom.cpp:249
CBlockIndex * ActiveTip() const EXCLUSIVE_LOCKS_REQUIRED(GetMutex())
Definition: validation.h:1443
Fast randomness source.
Definition: random.h:411
bool Invalid(Result result, const std::string &reject_reason="", const std::string &debug_message="")
Definition: validation.h:101
Result GetResult() const
Definition: validation.h:122
std::string ToString() const
Definition: validation.h:125
bool selectStakingRewardWinner(const CBlockIndex *pprev, std::vector< std::pair< ProofId, CScript > > &winners)
Deterministically select a list of payout scripts based on the proof set and the previous block hash.
uint32_t connectedPeersScore
Definition: peermanager.h:239
std::vector< RemoteProof > getRemoteProofs(const NodeId nodeid) const
bool removeNode(NodeId nodeid)
bool setFinalized(PeerId peerid)
Latch on that this peer has a finalized proof.
bool dumpPeersToFile(const fs::path &dumpPath) const
RemoteProofSet remoteProofs
Remember which node sent which proof so we have an image of the proof set of our peers.
Definition: peermanager.h:283
bool updateNextRequestTimeForResponse(NodeId nodeid, const Response &response)
bool isDangling(const ProofId &proofid) const
bool unsetFlaky(const ProofId &proofid)
std::optional< bool > getRemotePresenceStatus(const ProofId &proofid) const
Get the presence remote status of a proof.
bool addNodeToPeer(const PeerSet::iterator &it)
Definition: peermanager.cpp:89
bool exists(const ProofId &proofid) const
Return true if the (valid) proof exists, but only for non-dangling proofs.
Definition: peermanager.h:414
PendingNodeSet pendingNodes
Definition: peermanager.h:225
bool verify() const
Perform consistency check on internal data structures.
bool hasRemoteProofStatus(const ProofId &proofid) const
bool forPeer(const ProofId &proofid, Callable &&func) const
Definition: peermanager.h:422
void finalizeStakeContender(const StakeContenderId &contenderId, BlockHash &prevblockhash, std::vector< std::pair< ProofId, CScript > > &newWinners)
bool latchAvaproofsSent(NodeId nodeid)
Flag that a node did send its compact proofs.
void cleanupStakeContenders(const int requestedMinHeight)
Make some of the contender cache API available.
bool addNode(NodeId nodeid, const ProofId &proofid)
Node API.
Definition: peermanager.cpp:33
bool updateNextRequestTimeForPoll(NodeId nodeid, SteadyMilliseconds timeout, uint64_t round)
static constexpr int SELECT_PEER_MAX_RETRY
Definition: peermanager.h:227
ProofIdSet m_unbroadcast_proofids
Track proof ids to broadcast.
Definition: peermanager.h:233
bool loadPeersFromFile(const fs::path &dumpPath, std::unordered_set< ProofRef, SaltedProofHasher > &registeredProofs)
RejectionMode
Rejection mode.
Definition: peermanager.h:402
void addUnbroadcastProof(const ProofId &proofid)
Proof broadcast API.
std::unordered_set< ProofRef, SaltedProofHasher > updatedBlockTip()
Update the peer set when a new block is connected.
void removeUnbroadcastProof(const ProofId &proofid)
void promoteStakeContendersToBlock(const CBlockIndex *pindex)
bool isBoundToPeer(const ProofId &proofid) const
bool setContenderStatusForLocalWinners(const CBlockIndex *prevblock, const std::vector< std::pair< ProofId, CScript > > winners, size_t maxPollable, std::vector< StakeContenderId > &pollableContenders)
ProofRadixTree shareableProofs
Definition: peermanager.h:191
bool saveRemoteProof(const ProofId &proofid, const NodeId nodeid, const bool present)
CRollingBloomFilter invalidProofs
Filter for proofs that are consensus-invalid or were recently invalidated by avalanche (finalized rej...
Definition: peermanager.h:297
uint64_t compact()
Trigger maintenance of internal data structures.
std::vector< Slot > slots
Definition: peermanager.h:163
uint32_t totalPeersScore
Quorum management.
Definition: peermanager.h:238
ProofPool danglingProofPool
Definition: peermanager.h:188
StakeContenderCache stakeContenderCache
Definition: peermanager.h:301
void setInvalid(const ProofId &proofid)
int getStakeContenderStatus(const StakeContenderId &contenderId, BlockHash &prevblockhashout) const
bool isFlaky(const ProofId &proofid) const
ChainstateManager & chainman
Definition: peermanager.h:243
bool isInvalid(const ProofId &proofid) const
std::unordered_set< ProofId, SaltedProofIdHasher > manualFlakyProofids
Definition: peermanager.h:299
bool removePeer(const PeerId peerid)
Remove an existing peer.
bool isImmature(const ProofId &proofid) const
bool addOrUpdateNode(const PeerSet::iterator &it, NodeId nodeid)
Definition: peermanager.cpp:48
bool rejectProof(const ProofId &proofid, RejectionMode mode=RejectionMode::DEFAULT)
ProofPool immatureProofPool
Definition: peermanager.h:187
RegistrationMode
Registration mode.
Definition: peermanager.h:379
ProofPool conflictingProofPool
Definition: peermanager.h:186
bool isStakingPreconsensusActivated() const
Definition: peermanager.h:569
static constexpr size_t MAX_REMOTE_PROOFS
Definition: peermanager.h:304
bool setFlaky(const ProofId &proofid)
void addStakeContender(const ProofRef &proof)
std::atomic< bool > needMoreNodes
Flag indicating that we failed to select a node and need to expand our node set.
Definition: peermanager.h:211
PeerId selectPeer() const
Randomly select a peer to poll.
bool isInConflictingPool(const ProofId &proofid) const
bool isRemotelyPresentProof(const ProofId &proofid) const
static constexpr int SELECT_NODE_MAX_RETRY
Definition: peermanager.h:228
void cleanupDanglingProofs(std::unordered_set< ProofRef, SaltedProofHasher > &registeredProofs)
void acceptStakeContender(const StakeContenderId &contenderId)
ProofRef getProof(const ProofId &proofid) const
bool registerProof(const ProofRef &proof, ProofRegistrationState &registrationState, RegistrationMode mode=RegistrationMode::DEFAULT)
void rejectStakeContender(const StakeContenderId &contenderId)
bool removeNodeFromPeer(const PeerSet::iterator &it, uint32_t count=1)
bool updateNextPossibleConflictTime(PeerId peerid, const std::chrono::seconds &nextTime)
Proof and Peer related API.
void moveToConflictingPool(const ProofContainer &proofs)
bool setStakeContenderWinners(const CBlockIndex *pindex, const std::vector< CScript > &payoutScripts)
AddProofStatus addProofIfPreferred(const ProofRef &proof, ConflictingProofSet &conflictingProofs)
Attempt to add a proof to the pool.
Definition: proofpool.cpp:54
size_t size() const
Definition: proofpool.h:135
AddProofStatus addProofIfNoConflict(const ProofRef &proof, ConflictingProofSet &conflictingProofs)
Attempt to add a proof to the pool, and fail if there is a conflict on any UTXO.
Definition: proofpool.cpp:13
size_t countProofs() const
Definition: proofpool.cpp:129
bool removeProof(ProofId proofid)
Definition: proofpool.cpp:79
void forEachProof(Callable &&func) const
Definition: proofpool.h:118
ProofRef getProof(const ProofId &proofid) const
Definition: proofpool.cpp:112
std::set< ProofRef, ConflictingProofComparator > ConflictingProofSet
Definition: proofpool.h:88
ProofRef getLowestScoreProof() const
Definition: proofpool.cpp:123
std::unordered_set< ProofRef, SaltedProofHasher > rescan(PeerManager &peerManager)
Definition: proofpool.cpp:86
bool getWinners(const BlockHash &prevblockhash, std::vector< std::pair< ProofId, CScript > > &winners) const
bool accept(const StakeContenderId &contenderId)
Helpers to set avalanche state of a contender.
void cleanup(const int requestedMinHeight)
size_t getPollableContenders(const BlockHash &prevblockhash, size_t maxPollable, std::vector< StakeContenderId > &pollableContenders) const
Get the best ranking contenders, accepted contenders ranking first.
bool reject(const StakeContenderId &contenderId)
bool setWinners(const CBlockIndex *pindex, const std::vector< CScript > &payoutScripts)
Set proof(s) that should be treated as winners (already finalized).
bool add(const CBlockIndex *pindex, const ProofRef &proof, uint8_t status=StakeContenderStatus::UNKNOWN)
Add a proof to consider in staking rewards pre-consensus.
void promoteToBlock(const CBlockIndex *activeTip, std::function< bool(const ProofId &proofid)> const &shouldPromote)
Promote cache entries to a the active chain tip.
int getVoteStatus(const StakeContenderId &contenderId, BlockHash &prevblockhashout) const
Get contender acceptance state for avalanche voting.
bool finalize(const StakeContenderId &contenderId)
std::string ToString() const
Definition: uint256.h:80
Path class wrapper to block calls to the fs::path(std::string) implicit constructor and the fs::path:...
Definition: fs.h:30
static const uint256 ZERO
Definition: uint256.h:134
RecursiveMutex cs_main
Mutex to guard access to validation specific variables, such as reading or changing the chainstate.
Definition: cs_main.cpp:7
int64_t NodeId
Definition: eviction.h:16
bool RenameOver(fs::path src, fs::path dest)
Rename src to dest.
Definition: fs_helpers.cpp:258
bool FileCommit(FILE *file)
Ensure file contents are fully committed to disk, using a platform-specific feature analogous to fsyn...
Definition: fs_helpers.cpp:111
std::string HexStr(const Span< const uint8_t > s)
Convert a span of bytes to a lower-case hexadecimal string.
Definition: hex_base.cpp:30
#define LogPrint(category,...)
Definition: logging.h:452
#define LogTrace(category,...)
Definition: logging.h:448
#define LogPrintf(...)
Definition: logging.h:424
@ AVALANCHE
Definition: logging.h:91
ProofRegistrationResult
Definition: peermanager.h:145
static constexpr uint32_t AVALANCHE_MAX_IMMATURE_PROOFS
Maximum number of immature proofs the peer manager will accept from the network.
Definition: peermanager.h:46
static bool isImmatureState(const ProofValidationState &state)
static constexpr uint64_t PEERS_DUMP_VERSION
Definition: peermanager.cpp:31
PeerId selectPeerImpl(const std::vector< Slot > &slots, const uint64_t slot, const uint64_t max)
Internal methods that are exposed for testing purposes.
RCUPtr< const Proof > ProofRef
Definition: proof.h:186
static std::string PathToString(const path &path)
Convert path object to byte string.
Definition: fs.h:147
FILE * fopen(const fs::path &p, const char *mode)
Definition: fs.cpp:30
static constexpr NodeId NO_NODE
Special NodeId that represent no node.
Definition: nodeid.h:15
Response response
Definition: processor.cpp:522
static std::string ToString(const CService &ip)
Definition: db.h:36
A BlockHash is a unqiue identifier for a block.
Definition: blockhash.h:13
RCUPtr< T > remove(const KeyType &key)
Remove an element from the tree.
Definition: radix.h:181
RCUPtr< T > get(const KeyType &key)
Get the value corresponding to a key.
Definition: radix.h:118
bool forEachLeaf(Callable &&func) const
Definition: radix.h:144
bool insert(const RCUPtr< T > &value)
Insert a value into the tree.
Definition: radix.h:112
Facility for using an uint256 as a radix tree key.
uint64_t last_round
Definition: node.h:25
SteadyMilliseconds nextRequestTime
Definition: node.h:23
bool avaproofsSent
Definition: node.h:24
std::chrono::seconds registration_time
Definition: peermanager.h:95
std::chrono::seconds nextPossibleConflictTime
Definition: peermanager.h:96
uint32_t node_count
Definition: peermanager.h:89
static constexpr auto DANGLING_TIMEOUT
Consider dropping the peer if no node is attached after this timeout expired.
Definition: peermanager.h:102
uint32_t index
Definition: peermanager.h:88
uint32_t getScore() const
Definition: peermanager.h:111
ProofRef proof
Definition: peermanager.h:91
uint64_t getStop() const
Definition: peermanager.h:75
uint64_t getStart() const
Definition: peermanager.h:74
PeerId getPeerId() const
Definition: peermanager.h:77
StakeContenderIds are unique for each block to ensure that the peer polling for their acceptance has ...
double ComputeProofRewardRank(uint32_t proofScore) const
To make sure the selection is properly weighted according to the proof score, we normalize the conten...
#define LOCK(cs)
Definition: sync.h:306
#define WITH_LOCK(cs, code)
Run code while locking a mutex.
Definition: sync.h:357
static int count
#define NO_THREAD_SAFETY_ANALYSIS
Definition: threadsafety.h:58
int64_t GetTime()
DEPRECATED Use either ClockType::now() or Now<TimePointType>() if a cast is needed.
Definition: time.cpp:62
std::chrono::time_point< std::chrono::steady_clock, std::chrono::milliseconds > SteadyMilliseconds
Definition: time.h:33
#define strprintf
Format arguments and return the string or write to given std::ostream (see tinyformat::format doc for...
Definition: tinyformat.h:1202
AssertLockHeld(pool.cs)
assert(!tx.IsCoinBase())