Bitcoin ABC 0.30.9
P2P Digital Currency
peermanager.cpp
Go to the documentation of this file.
1// Copyright (c) 2020 The Bitcoin developers
2// Distributed under the MIT software license, see the accompanying
3// file COPYING or http://www.opensource.org/licenses/mit-license.php.
4
6
12#include <cashaddrenc.h>
13#include <common/args.h>
15#include <logging.h>
16#include <random.h>
17#include <scheduler.h>
18#include <threadsafety.h>
19#include <uint256.h>
20#include <util/fastrange.h>
21#include <util/fs_helpers.h>
22#include <util/time.h>
23#include <validation.h> // For ChainstateManager
24
25#include <algorithm>
26#include <cassert>
27#include <limits>
28
29namespace avalanche {
30static constexpr uint64_t PEERS_DUMP_VERSION{1};
31
32bool PeerManager::addNode(NodeId nodeid, const ProofId &proofid) {
33 auto &pview = peers.get<by_proofid>();
34 auto it = pview.find(proofid);
35 if (it == pview.end()) {
36 // If the node exists, it is actually updating its proof to an unknown
37 // one. In this case we need to remove it so it is not both active and
38 // pending at the same time.
39 removeNode(nodeid);
40 pendingNodes.emplace(proofid, nodeid);
41 return false;
42 }
43
44 return addOrUpdateNode(peers.project<0>(it), nodeid);
45}
46
47bool PeerManager::addOrUpdateNode(const PeerSet::iterator &it, NodeId nodeid) {
48 assert(it != peers.end());
49
50 const PeerId peerid = it->peerid;
51
52 auto nit = nodes.find(nodeid);
53 if (nit == nodes.end()) {
54 if (!nodes.emplace(nodeid, peerid).second) {
55 return false;
56 }
57 } else {
58 const PeerId oldpeerid = nit->peerid;
59 if (!nodes.modify(nit, [&](Node &n) { n.peerid = peerid; })) {
60 return false;
61 }
62
63 // We actually have this node already, we need to update it.
64 bool success = removeNodeFromPeer(peers.find(oldpeerid));
65 assert(success);
66 }
67
68 // Then increase the node counter, and create the slot if needed
69 bool success = addNodeToPeer(it);
70 assert(success);
71
72 // If the added node was in the pending set, remove it
73 pendingNodes.get<by_nodeid>().erase(nodeid);
74
75 // If the proof was in the dangling pool, remove it
76 const ProofId &proofid = it->getProofId();
77 if (danglingProofPool.getProof(proofid)) {
79 }
80
81 // We know for sure there is at least 1 node. Note that this can fail if
82 // there is more than 1, in this case it's a no-op.
83 shareableProofs.insert(it->proof);
84
85 return true;
86}
87
88bool PeerManager::addNodeToPeer(const PeerSet::iterator &it) {
89 assert(it != peers.end());
90 return peers.modify(it, [&](Peer &p) {
91 if (p.node_count++ > 0) {
92 // We are done.
93 return;
94 }
95
96 // We need to allocate this peer.
97 p.index = uint32_t(slots.size());
98 const uint32_t score = p.getScore();
99 const uint64_t start = slotCount;
100 slots.emplace_back(start, score, it->peerid);
101 slotCount = start + score;
102
103 // Add to our allocated score when we allocate a new peer in the slots
104 connectedPeersScore += score;
105 });
106}
107
109 // Remove all the remote proofs from this node
110 auto &remoteProofsView = remoteProofs.get<by_nodeid>();
111 auto [begin, end] = remoteProofsView.equal_range(nodeid);
112 remoteProofsView.erase(begin, end);
113
114 if (pendingNodes.get<by_nodeid>().erase(nodeid) > 0) {
115 // If this was a pending node, there is nothing else to do.
116 return true;
117 }
118
119 auto it = nodes.find(nodeid);
120 if (it == nodes.end()) {
121 return false;
122 }
123
124 const PeerId peerid = it->peerid;
125 nodes.erase(it);
126
127 // Keep the track of the reference count.
128 bool success = removeNodeFromPeer(peers.find(peerid));
129 assert(success);
130
131 return true;
132}
133
134bool PeerManager::removeNodeFromPeer(const PeerSet::iterator &it,
135 uint32_t count) {
136 // It is possible for nodes to be dangling. If there was an inflight query
137 // when the peer gets removed, the node was not erased. In this case there
138 // is nothing to do.
139 if (it == peers.end()) {
140 return true;
141 }
142
143 assert(count <= it->node_count);
144 if (count == 0) {
145 // This is a NOOP.
146 return false;
147 }
148
149 const uint32_t new_count = it->node_count - count;
150 if (!peers.modify(it, [&](Peer &p) { p.node_count = new_count; })) {
151 return false;
152 }
153
154 if (new_count > 0) {
155 // We are done.
156 return true;
157 }
158
159 // There are no more nodes left, we need to clean up. Remove from the radix
160 // tree (unless it's our local proof), subtract allocated score and remove
161 // from slots.
162 if (!localProof || it->getProofId() != localProof->getId()) {
163 const auto removed = shareableProofs.remove(it->getProofId());
164 assert(removed);
165 }
166
167 const size_t i = it->index;
168 assert(i < slots.size());
169 assert(connectedPeersScore >= slots[i].getScore());
170 connectedPeersScore -= slots[i].getScore();
171
172 if (i + 1 == slots.size()) {
173 slots.pop_back();
174 slotCount = slots.empty() ? 0 : slots.back().getStop();
175 } else {
176 fragmentation += slots[i].getScore();
177 slots[i] = slots[i].withPeerId(NO_PEER);
178 }
179
180 return true;
181}
182
184 SteadyMilliseconds timeout) {
185 auto it = nodes.find(nodeid);
186 if (it == nodes.end()) {
187 return false;
188 }
189
190 return nodes.modify(it, [&](Node &n) { n.nextRequestTime = timeout; });
191}
192
194 auto it = nodes.find(nodeid);
195 if (it == nodes.end()) {
196 return false;
197 }
198
199 return !it->avaproofsSent &&
200 nodes.modify(it, [&](Node &n) { n.avaproofsSent = true; });
201}
202
203static bool isImmatureState(const ProofValidationState &state) {
205}
206
208 PeerId peerid, const std::chrono::seconds &nextTime) {
209 auto it = peers.find(peerid);
210 if (it == peers.end()) {
211 // No such peer
212 return false;
213 }
214
215 // Make sure we don't move the time in the past.
216 peers.modify(it, [&](Peer &p) {
218 std::max(p.nextPossibleConflictTime, nextTime);
219 });
220
221 return it->nextPossibleConflictTime == nextTime;
222}
223
225 auto it = peers.find(peerid);
226 if (it == peers.end()) {
227 // No such peer
228 return false;
229 }
230
231 peers.modify(it, [&](Peer &p) { p.hasFinalized = true; });
232
233 return true;
234}
235
236template <typename ProofContainer>
237void PeerManager::moveToConflictingPool(const ProofContainer &proofs) {
238 auto &peersView = peers.get<by_proofid>();
239 for (const ProofRef &proof : proofs) {
240 auto it = peersView.find(proof->getId());
241 if (it != peersView.end()) {
242 removePeer(it->peerid);
243 }
244
246 }
247}
248
250 ProofRegistrationState &registrationState,
251 RegistrationMode mode) {
252 assert(proof);
253
254 const ProofId &proofid = proof->getId();
255
256 auto invalidate = [&](ProofRegistrationResult result,
257 const std::string &message) {
258 return registrationState.Invalid(
259 result, message, strprintf("proofid: %s", proofid.ToString()));
260 };
261
262 if ((mode != RegistrationMode::FORCE_ACCEPT ||
263 !isInConflictingPool(proofid)) &&
264 exists(proofid)) {
265 // In default mode, we expect the proof to be unknown, i.e. in none of
266 // the pools.
267 // In forced accept mode, the proof can be in the conflicting pool.
269 "proof-already-registered");
270 }
271
272 if (danglingProofPool.getProof(proofid) &&
273 pendingNodes.count(proofid) == 0) {
274 // Don't attempt to register a proof that we already evicted because it
275 // was dangling, but rather attempt to retrieve an associated node.
276 needMoreNodes = true;
277 return invalidate(ProofRegistrationResult::DANGLING, "dangling-proof");
278 }
279
280 // Check the proof's validity.
281 ProofValidationState validationState;
282 if (!WITH_LOCK(cs_main, return proof->verify(stakeUtxoDustThreshold,
283 chainman, validationState))) {
284 if (isImmatureState(validationState)) {
288 // Adding this proof exceeds the immature pool limit, so evict
289 // the lowest scoring proof.
292 }
293
294 return invalidate(ProofRegistrationResult::IMMATURE,
295 "immature-proof");
296 }
297
298 if (validationState.GetResult() ==
301 "utxo-missing-or-spent");
302 }
303
304 // Reject invalid proof.
305 return invalidate(ProofRegistrationResult::INVALID, "invalid-proof");
306 }
307
308 auto now = GetTime<std::chrono::seconds>();
309 auto nextCooldownTimePoint =
310 now + std::chrono::seconds(gArgs.GetIntArg(
311 "-avalancheconflictingproofcooldown",
313
314 ProofPool::ConflictingProofSet conflictingProofs;
315 switch (validProofPool.addProofIfNoConflict(proof, conflictingProofs)) {
316 case ProofPool::AddProofStatus::REJECTED: {
317 if (mode != RegistrationMode::FORCE_ACCEPT) {
318 auto bestPossibleConflictTime = std::chrono::seconds(0);
319 auto &pview = peers.get<by_proofid>();
320 for (auto &conflictingProof : conflictingProofs) {
321 auto it = pview.find(conflictingProof->getId());
322 assert(it != pview.end());
323
324 // Search the most recent time over the peers
325 bestPossibleConflictTime = std::max(
326 bestPossibleConflictTime, it->nextPossibleConflictTime);
327
329 nextCooldownTimePoint);
330 }
331
332 if (bestPossibleConflictTime > now) {
333 // Cooldown not elapsed, reject the proof.
334 return invalidate(
336 "cooldown-not-elapsed");
337 }
338
339 // Give the proof a chance to replace the conflicting ones.
341 // If we have overridden other proofs due to conflict,
342 // remove the peers and attempt to move them to the
343 // conflicting pool.
344 moveToConflictingPool(conflictingProofs);
345
346 // Replacement is successful, continue to peer creation
347 break;
348 }
349
350 // Not the preferred proof, or replacement is not enabled
352 ProofPool::AddProofStatus::REJECTED
354 "rejected-proof")
356 "conflicting-utxos");
357 }
358
360
361 // Move the conflicting proofs from the valid pool to the
362 // conflicting pool
363 moveToConflictingPool(conflictingProofs);
364
365 auto status = validProofPool.addProofIfNoConflict(proof);
366 assert(status == ProofPool::AddProofStatus::SUCCEED);
367
368 break;
369 }
370 case ProofPool::AddProofStatus::DUPLICATED:
371 // If the proof was already in the pool, don't duplicate the peer.
373 "proof-already-registered");
374 case ProofPool::AddProofStatus::SUCCEED:
375 break;
376
377 // No default case, so the compiler can warn about missing cases
378 }
379
380 // At this stage we are going to create a peer so the proof should never
381 // exist in the conflicting pool, but use belt and suspenders.
383
384 // New peer means new peerid!
385 const PeerId peerid = nextPeerId++;
386
387 // We have no peer for this proof, time to create it.
388 auto inserted = peers.emplace(peerid, proof, nextCooldownTimePoint);
389 assert(inserted.second);
390
391 if (localProof && proof->getId() == localProof->getId()) {
392 // Add it to the shareable proofs even if there is no node, we are the
393 // node. Otherwise it will be inserted after a node is attached to the
394 // proof.
395 shareableProofs.insert(proof);
396 }
397
398 // Add to our registered score when adding to the peer list
399 totalPeersScore += proof->getScore();
400
401 // If there are nodes waiting for this proof, add them
402 auto &pendingNodesView = pendingNodes.get<by_proofid>();
403 auto range = pendingNodesView.equal_range(proofid);
404
405 // We want to update the nodes then remove them from the pending set. That
406 // will invalidate the range iterators, so we need to save the node ids
407 // first before we can loop over them.
408 std::vector<NodeId> nodeids;
409 nodeids.reserve(std::distance(range.first, range.second));
410 std::transform(range.first, range.second, std::back_inserter(nodeids),
411 [](const PendingNode &n) { return n.nodeid; });
412
413 for (const NodeId &nodeid : nodeids) {
414 addOrUpdateNode(inserted.first, nodeid);
415 }
416
417 return true;
418}
419
421 if (isDangling(proofid) && mode == RejectionMode::INVALIDATE) {
423 return true;
424 }
425
426 if (!exists(proofid)) {
427 return false;
428 }
429
430 if (immatureProofPool.removeProof(proofid)) {
431 return true;
432 }
433
434 if (mode == RejectionMode::DEFAULT &&
436 // In default mode we keep the proof in the conflicting pool
437 return true;
438 }
439
440 if (mode == RejectionMode::INVALIDATE &&
442 // In invalidate mode we remove the proof completely
443 return true;
444 }
445
446 auto &pview = peers.get<by_proofid>();
447 auto it = pview.find(proofid);
448 assert(it != pview.end());
449
450 const ProofRef proof = it->proof;
451
452 if (!removePeer(it->peerid)) {
453 return false;
454 }
455
456 // If there was conflicting proofs, attempt to pull them back
457 for (const SignedStake &ss : proof->getStakes()) {
458 const ProofRef conflictingProof =
459 conflictingProofPool.getProof(ss.getStake().getUTXO());
460 if (!conflictingProof) {
461 continue;
462 }
463
464 conflictingProofPool.removeProof(conflictingProof->getId());
465 registerProof(conflictingProof);
466 }
467
468 if (mode == RejectionMode::DEFAULT) {
470 }
471
472 return true;
473}
474
476 std::unordered_set<ProofRef, SaltedProofHasher> &registeredProofs) {
477 registeredProofs.clear();
478 const auto now = GetTime<std::chrono::seconds>();
479
480 std::vector<ProofRef> newlyDanglingProofs;
481 for (const Peer &peer : peers) {
482 // If the peer is not our local proof, has been registered for some
483 // time and has no node attached, discard it.
484 if ((!localProof || peer.getProofId() != localProof->getId()) &&
485 peer.node_count == 0 &&
486 (peer.registration_time + Peer::DANGLING_TIMEOUT) <= now) {
487 // Check the remotes status to determine if we should set the proof
488 // as dangling. This prevents from dropping a proof on our own due
489 // to a network issue. If the remote presence status is inconclusive
490 // we assume our own position (missing = false).
491 if (!getRemotePresenceStatus(peer.getProofId()).value_or(false)) {
492 newlyDanglingProofs.push_back(peer.proof);
493 }
494 }
495 }
496
497 // Similarly, check if we have dangling proofs that could be pulled back
498 // because the network says so.
499 std::vector<ProofRef> previouslyDanglingProofs;
500 danglingProofPool.forEachProof([&](const ProofRef &proof) {
501 if (getRemotePresenceStatus(proof->getId()).value_or(false)) {
502 previouslyDanglingProofs.push_back(proof);
503 }
504 });
505 for (const ProofRef &proof : previouslyDanglingProofs) {
506 danglingProofPool.removeProof(proof->getId());
507 if (registerProof(proof)) {
508 registeredProofs.insert(proof);
509 }
510 }
511
512 for (const ProofRef &proof : newlyDanglingProofs) {
513 rejectProof(proof->getId(), RejectionMode::INVALIDATE);
515 // If the proof is added, it means there is no better conflicting
516 // dangling proof and this is not a duplicated, so it's worth
517 // printing a message to the log.
519 "Proof dangling for too long (no connected node): %s\n",
520 proof->getId().GetHex());
521 }
522 }
523
524 // If we have dangling proof, this is a good indicator that we need to
525 // request more nodes from our peers.
526 needMoreNodes = !newlyDanglingProofs.empty();
527}
528
530 for (int retry = 0; retry < SELECT_NODE_MAX_RETRY; retry++) {
531 const PeerId p = selectPeer();
532
533 // If we cannot find a peer, it may be due to the fact that it is
534 // unlikely due to high fragmentation, so compact and retry.
535 if (p == NO_PEER) {
536 compact();
537 continue;
538 }
539
540 // See if that peer has an available node.
541 auto &nview = nodes.get<next_request_time>();
542 auto it = nview.lower_bound(boost::make_tuple(p, SteadyMilliseconds()));
543 if (it != nview.end() && it->peerid == p &&
544 it->nextRequestTime <= Now<SteadyMilliseconds>()) {
545 return it->nodeid;
546 }
547 }
548
549 // We failed to find a node to query, flag this so we can request more
550 needMoreNodes = true;
551
552 return NO_NODE;
553}
554
555std::unordered_set<ProofRef, SaltedProofHasher> PeerManager::updatedBlockTip() {
556 std::vector<ProofId> invalidProofIds;
557 std::vector<ProofRef> newImmatures;
558
559 {
560 LOCK(cs_main);
561
562 for (const auto &p : peers) {
564 if (!p.proof->verify(stakeUtxoDustThreshold, chainman, state)) {
565 if (isImmatureState(state)) {
566 newImmatures.push_back(p.proof);
567 }
568 invalidProofIds.push_back(p.getProofId());
569
571 "Invalidating proof %s: verification failed (%s)\n",
572 p.proof->getId().GetHex(), state.ToString());
573 }
574 }
575
576 // Disable thread safety analysis here because it does not play nicely
577 // with the lambda
579 [&](const ProofRef &proof) NO_THREAD_SAFETY_ANALYSIS {
582 if (!proof->verify(stakeUtxoDustThreshold, chainman, state)) {
583 invalidProofIds.push_back(proof->getId());
584
585 LogPrint(
587 "Invalidating dangling proof %s: verification failed "
588 "(%s)\n",
589 proof->getId().GetHex(), state.ToString());
590 }
591 });
592 }
593
594 // Remove the invalid proofs before the immature rescan. This makes it
595 // possible to pull back proofs with utxos that conflicted with these
596 // invalid proofs.
597 for (const ProofId &invalidProofId : invalidProofIds) {
598 rejectProof(invalidProofId, RejectionMode::INVALIDATE);
599 }
600
601 auto registeredProofs = immatureProofPool.rescan(*this);
602
603 for (auto &p : newImmatures) {
605 }
606
607 return registeredProofs;
608}
609
611 ProofRef proof;
612
613 forPeer(proofid, [&](const Peer &p) {
614 proof = p.proof;
615 return true;
616 });
617
618 if (!proof) {
619 proof = conflictingProofPool.getProof(proofid);
620 }
621
622 if (!proof) {
623 proof = immatureProofPool.getProof(proofid);
624 }
625
626 return proof;
627}
628
629bool PeerManager::isBoundToPeer(const ProofId &proofid) const {
630 auto &pview = peers.get<by_proofid>();
631 return pview.find(proofid) != pview.end();
632}
633
634bool PeerManager::isImmature(const ProofId &proofid) const {
635 return immatureProofPool.getProof(proofid) != nullptr;
636}
637
638bool PeerManager::isInConflictingPool(const ProofId &proofid) const {
639 return conflictingProofPool.getProof(proofid) != nullptr;
640}
641
642bool PeerManager::isDangling(const ProofId &proofid) const {
643 return danglingProofPool.getProof(proofid) != nullptr;
644}
645
646void PeerManager::setInvalid(const ProofId &proofid) {
647 invalidProofs.insert(proofid);
648}
649
650bool PeerManager::isInvalid(const ProofId &proofid) const {
651 return invalidProofs.contains(proofid);
652}
653
656}
657
658bool PeerManager::saveRemoteProof(const ProofId &proofid, const NodeId nodeid,
659 const bool present) {
660 // Get how many proofs this node has announced
661 auto &remoteProofsByLastUpdate = remoteProofs.get<by_lastUpdate>();
662 auto [begin, end] = remoteProofsByLastUpdate.equal_range(nodeid);
663
664 // Limit the number of proofs a single node can save:
665 // - At least MAX_REMOTE_PROOFS
666 // - Up to 2x as much as we have
667 // The MAX_REMOTE_PROOFS minimum is there to ensure we don't overlimit at
668 // startup when we don't have proofs yet.
669 while (size_t(std::distance(begin, end)) >=
670 std::max(MAX_REMOTE_PROOFS, 2 * peers.size())) {
671 // Remove the proof with the oldest update time
672 begin = remoteProofsByLastUpdate.erase(begin);
673 }
674
675 auto it = remoteProofs.find(boost::make_tuple(proofid, nodeid));
676 if (it != remoteProofs.end()) {
677 remoteProofs.erase(it);
678 }
679
680 return remoteProofs
681 .emplace(RemoteProof{proofid, nodeid, GetTime<std::chrono::seconds>(),
682 present})
683 .second;
684}
685
686std::vector<RemoteProof>
688 std::vector<RemoteProof> nodeRemoteProofs;
689
690 auto &remoteProofsByLastUpdate = remoteProofs.get<by_lastUpdate>();
691 auto [begin, end] = remoteProofsByLastUpdate.equal_range(nodeid);
692
693 for (auto &it = begin; it != end; it++) {
694 nodeRemoteProofs.emplace_back(*it);
695 }
696
697 return nodeRemoteProofs;
698}
699
700bool PeerManager::isRemoteProof(const ProofId &proofid) const {
701 auto &view = remoteProofs.get<by_proofid>();
702 return view.count(proofid) > 0;
703}
704
705bool PeerManager::removePeer(const PeerId peerid) {
706 auto it = peers.find(peerid);
707 if (it == peers.end()) {
708 return false;
709 }
710
711 // Remove all nodes from this peer.
712 removeNodeFromPeer(it, it->node_count);
713
714 auto &nview = nodes.get<next_request_time>();
715
716 // Add the nodes to the pending set
717 auto range = nview.equal_range(peerid);
718 for (auto &nit = range.first; nit != range.second; ++nit) {
719 pendingNodes.emplace(it->getProofId(), nit->nodeid);
720 };
721
722 // Remove nodes associated with this peer, unless their timeout is still
723 // active. This ensure that we don't overquery them in case they are
724 // subsequently added to another peer.
725 nview.erase(
726 nview.lower_bound(boost::make_tuple(peerid, SteadyMilliseconds())),
727 nview.upper_bound(
728 boost::make_tuple(peerid, Now<SteadyMilliseconds>())));
729
730 // Release UTXOs attached to this proof.
731 validProofPool.removeProof(it->getProofId());
732
733 // If there were nodes attached, remove from the radix tree as well
734 auto removed = shareableProofs.remove(Uint256RadixKey(it->getProofId()));
735
736 m_unbroadcast_proofids.erase(it->getProofId());
737
738 // Remove the peer from the PeerSet and remove its score from the registered
739 // score total.
740 assert(totalPeersScore >= it->getScore());
741 totalPeersScore -= it->getScore();
742 peers.erase(it);
743 return true;
744}
745
747 if (slots.empty() || slotCount == 0) {
748 return NO_PEER;
749 }
750
751 const uint64_t max = slotCount;
752 for (int retry = 0; retry < SELECT_PEER_MAX_RETRY; retry++) {
753 size_t i = selectPeerImpl(slots, GetRand(max), max);
754 if (i != NO_PEER) {
755 return i;
756 }
757 }
758
759 return NO_PEER;
760}
761
763 // There is nothing to compact.
764 if (fragmentation == 0) {
765 return 0;
766 }
767
768 std::vector<Slot> newslots;
769 newslots.reserve(peers.size());
770
771 uint64_t prevStop = 0;
772 uint32_t i = 0;
773 for (auto it = peers.begin(); it != peers.end(); it++) {
774 if (it->node_count == 0) {
775 continue;
776 }
777
778 newslots.emplace_back(prevStop, it->getScore(), it->peerid);
779 prevStop = slots[i].getStop();
780 if (!peers.modify(it, [&](Peer &p) { p.index = i++; })) {
781 return 0;
782 }
783 }
784
785 slots = std::move(newslots);
786
787 const uint64_t saved = slotCount - prevStop;
788 slotCount = prevStop;
789 fragmentation = 0;
790
791 return saved;
792}
793
795 uint64_t prevStop = 0;
796 uint32_t scoreFromSlots = 0;
797 for (size_t i = 0; i < slots.size(); i++) {
798 const Slot &s = slots[i];
799
800 // Slots must be in correct order.
801 if (s.getStart() < prevStop) {
802 return false;
803 }
804
805 prevStop = s.getStop();
806
807 // If this is a dead slot, then nothing more needs to be checked.
808 if (s.getPeerId() == NO_PEER) {
809 continue;
810 }
811
812 // We have a live slot, verify index.
813 auto it = peers.find(s.getPeerId());
814 if (it == peers.end() || it->index != i) {
815 return false;
816 }
817
818 // Accumulate score across slots
819 scoreFromSlots += slots[i].getScore();
820 }
821
822 // Score across slots must be the same as our allocated score
823 if (scoreFromSlots != connectedPeersScore) {
824 return false;
825 }
826
827 uint32_t scoreFromAllPeers = 0;
828 uint32_t scoreFromPeersWithNodes = 0;
829
830 std::unordered_set<COutPoint, SaltedOutpointHasher> peersUtxos;
831 for (const auto &p : peers) {
832 // Accumulate the score across peers to compare with total known score
833 scoreFromAllPeers += p.getScore();
834
835 // A peer should have a proof attached
836 if (!p.proof) {
837 return false;
838 }
839
840 // Check proof pool consistency
841 for (const auto &ss : p.proof->getStakes()) {
842 const COutPoint &outpoint = ss.getStake().getUTXO();
843 auto proof = validProofPool.getProof(outpoint);
844
845 if (!proof) {
846 // Missing utxo
847 return false;
848 }
849 if (proof != p.proof) {
850 // Wrong proof
851 return false;
852 }
853
854 if (!peersUtxos.emplace(outpoint).second) {
855 // Duplicated utxo
856 return false;
857 }
858 }
859
860 // Count node attached to this peer.
861 const auto count_nodes = [&]() {
862 size_t count = 0;
863 auto &nview = nodes.get<next_request_time>();
864 auto begin = nview.lower_bound(
865 boost::make_tuple(p.peerid, SteadyMilliseconds()));
866 auto end = nview.upper_bound(
867 boost::make_tuple(p.peerid + 1, SteadyMilliseconds()));
868
869 for (auto it = begin; it != end; ++it) {
870 count++;
871 }
872
873 return count;
874 };
875
876 if (p.node_count != count_nodes()) {
877 return false;
878 }
879
880 // If there are no nodes attached to this peer, then we are done.
881 if (p.node_count == 0) {
882 continue;
883 }
884
885 scoreFromPeersWithNodes += p.getScore();
886 // The index must point to a slot refering to this peer.
887 if (p.index >= slots.size() || slots[p.index].getPeerId() != p.peerid) {
888 return false;
889 }
890
891 // If the score do not match, same thing.
892 if (slots[p.index].getScore() != p.getScore()) {
893 return false;
894 }
895
896 // Check the proof is in the radix tree only if there are nodes attached
897 if (((localProof && p.getProofId() == localProof->getId()) ||
898 p.node_count > 0) &&
899 shareableProofs.get(p.getProofId()) == nullptr) {
900 return false;
901 }
902 if (p.node_count == 0 &&
903 shareableProofs.get(p.getProofId()) != nullptr) {
904 return false;
905 }
906 }
907
908 // Check our accumulated scores against our registred and allocated scores
909 if (scoreFromAllPeers != totalPeersScore) {
910 return false;
911 }
912 if (scoreFromPeersWithNodes != connectedPeersScore) {
913 return false;
914 }
915
916 // We checked the utxo consistency for all our peers utxos already, so if
917 // the pool size differs from the expected one there are dangling utxos.
918 if (validProofPool.size() != peersUtxos.size()) {
919 return false;
920 }
921
922 // Check there is no dangling proof in the radix tree
924 return isBoundToPeer(pLeaf->getId());
925 });
926}
927
928PeerId selectPeerImpl(const std::vector<Slot> &slots, const uint64_t slot,
929 const uint64_t max) {
930 assert(slot <= max);
931
932 size_t begin = 0, end = slots.size();
933 uint64_t bottom = 0, top = max;
934
935 // Try to find the slot using dichotomic search.
936 while ((end - begin) > 8) {
937 // The slot we picked in not allocated.
938 if (slot < bottom || slot >= top) {
939 return NO_PEER;
940 }
941
942 // Guesstimate the position of the slot.
943 size_t i = begin + ((slot - bottom) * (end - begin) / (top - bottom));
944 assert(begin <= i && i < end);
945
946 // We have a match.
947 if (slots[i].contains(slot)) {
948 return slots[i].getPeerId();
949 }
950
951 // We undershooted.
952 if (slots[i].precedes(slot)) {
953 begin = i + 1;
954 if (begin >= end) {
955 return NO_PEER;
956 }
957
958 bottom = slots[begin].getStart();
959 continue;
960 }
961
962 // We overshooted.
963 if (slots[i].follows(slot)) {
964 end = i;
965 top = slots[end].getStart();
966 continue;
967 }
968
969 // We have an unalocated slot.
970 return NO_PEER;
971 }
972
973 // Enough of that nonsense, let fallback to linear search.
974 for (size_t i = begin; i < end; i++) {
975 // We have a match.
976 if (slots[i].contains(slot)) {
977 return slots[i].getPeerId();
978 }
979 }
980
981 // We failed to find a slot, retry.
982 return NO_PEER;
983}
984
986 // The proof should be bound to a peer
987 if (isBoundToPeer(proofid)) {
988 m_unbroadcast_proofids.insert(proofid);
989 }
990}
991
993 m_unbroadcast_proofids.erase(proofid);
994}
995
997 const CBlockIndex *pprev,
998 std::vector<std::pair<ProofId, CScript>> &winners) {
999 if (!pprev) {
1000 return false;
1001 }
1002
1003 // Don't select proofs that have not been known for long enough, i.e. at
1004 // least since twice the dangling proof cleanup timeout before the last
1005 // block time, so we're sure to not account for proofs more recent than the
1006 // previous block or lacking node connected.
1007 // The previous block time is capped to now for the unlikely event the
1008 // previous block time is in the future.
1009 auto registrationDelay = std::chrono::duration_cast<std::chrono::seconds>(
1011 auto maxRegistrationDelay =
1012 std::chrono::duration_cast<std::chrono::seconds>(
1014 auto minRegistrationDelay =
1015 std::chrono::duration_cast<std::chrono::seconds>(
1017
1018 const int64_t refTime = std::min(pprev->GetBlockTime(), GetTime());
1019
1020 const int64_t targetRegistrationTime = refTime - registrationDelay.count();
1021 const int64_t maxRegistrationTime = refTime - minRegistrationDelay.count();
1022 const int64_t minRegistrationTime = refTime - maxRegistrationDelay.count();
1023
1024 const BlockHash prevblockhash = pprev->GetBlockHash();
1025
1026 std::vector<ProofRef> selectedProofs;
1027 ProofRef firstCompliantProof = ProofRef();
1028 while (selectedProofs.size() < peers.size()) {
1029 double bestRewardRank = std::numeric_limits<double>::max();
1030 ProofRef selectedProof = ProofRef();
1031 int64_t selectedProofRegistrationTime{0};
1032 StakeContenderId bestRewardHash;
1033
1034 for (const Peer &peer : peers) {
1035 if (!peer.proof) {
1036 // Should never happen, continue
1037 continue;
1038 }
1039
1040 if (!peer.hasFinalized ||
1041 peer.registration_time.count() >= maxRegistrationTime) {
1042 continue;
1043 }
1044
1045 if (std::find_if(selectedProofs.begin(), selectedProofs.end(),
1046 [&peer](const ProofRef &proof) {
1047 return peer.getProofId() == proof->getId();
1048 }) != selectedProofs.end()) {
1049 continue;
1050 }
1051
1052 StakeContenderId proofRewardHash(prevblockhash, peer.getProofId());
1053 if (proofRewardHash == uint256::ZERO) {
1054 // This either the result of an incredibly unlikely lucky hash,
1055 // or a the hash is getting abused. In this case, skip the
1056 // proof.
1057 LogPrintf(
1058 "Staking reward hash has a suspicious value of zero for "
1059 "proof %s and blockhash %s, skipping\n",
1060 peer.getProofId().ToString(), prevblockhash.ToString());
1061 continue;
1062 }
1063
1064 double proofRewardRank =
1065 proofRewardHash.ComputeProofRewardRank(peer.getScore());
1066 // If selectedProof is nullptr, this means that bestRewardRank is
1067 // MAX_DOUBLE so the comparison will always select this proof as the
1068 // preferred one. As a consequence it is safe to use 0 as a proofid.
1070 proofRewardHash, proofRewardRank, peer.getProofId(),
1071 bestRewardHash, bestRewardRank,
1072 selectedProof ? selectedProof->getId()
1073 : ProofId(uint256::ZERO))) {
1074 bestRewardRank = proofRewardRank;
1075 selectedProof = peer.proof;
1076 selectedProofRegistrationTime = peer.registration_time.count();
1077 bestRewardHash = proofRewardHash;
1078 }
1079 }
1080
1081 if (!selectedProof) {
1082 // No winner
1083 break;
1084 }
1085
1086 if (!firstCompliantProof &&
1087 selectedProofRegistrationTime < targetRegistrationTime) {
1088 firstCompliantProof = selectedProof;
1089 }
1090
1091 selectedProofs.push_back(selectedProof);
1092
1093 if (selectedProofRegistrationTime < minRegistrationTime &&
1094 !isFlaky(selectedProof->getId())) {
1095 break;
1096 }
1097 }
1098
1099 winners.clear();
1100
1101 if (!firstCompliantProof) {
1102 return false;
1103 }
1104
1105 winners.reserve(selectedProofs.size());
1106
1107 // Find the winner
1108 for (const ProofRef &proof : selectedProofs) {
1109 if (proof->getId() == firstCompliantProof->getId()) {
1110 winners.push_back({proof->getId(), proof->getPayoutScript()});
1111 }
1112 }
1113 // Add the others (if any) after the winner
1114 for (const ProofRef &proof : selectedProofs) {
1115 if (proof->getId() != firstCompliantProof->getId()) {
1116 winners.push_back({proof->getId(), proof->getPayoutScript()});
1117 }
1118 }
1119
1120 return true;
1121}
1122
1123bool PeerManager::setFlaky(const ProofId &proofid) {
1124 return manualFlakyProofids.insert(proofid).second;
1125}
1126
1127bool PeerManager::unsetFlaky(const ProofId &proofid) {
1128 return manualFlakyProofids.erase(proofid) > 0;
1129}
1130
1131bool PeerManager::isFlaky(const ProofId &proofid) const {
1132 if (localProof && proofid == localProof->getId()) {
1133 return false;
1134 }
1135
1136 if (manualFlakyProofids.count(proofid) > 0) {
1137 return true;
1138 }
1139
1140 // If we are missing connection to this proof, consider flaky
1141 if (forPeer(proofid,
1142 [](const Peer &peer) { return peer.node_count == 0; })) {
1143 return true;
1144 }
1145
1146 auto &remoteProofsByNodeId = remoteProofs.get<by_nodeid>();
1147 auto &nview = nodes.get<next_request_time>();
1148
1149 std::unordered_map<PeerId, std::unordered_set<ProofId, SaltedProofIdHasher>>
1150 missing_per_peer;
1151
1152 // Construct a set of missing proof ids per peer
1153 double total_score{0};
1154 for (const Peer &peer : peers) {
1155 const PeerId peerid = peer.peerid;
1156
1157 total_score += peer.getScore();
1158
1159 auto nodes_range = nview.equal_range(peerid);
1160 for (auto &nit = nodes_range.first; nit != nodes_range.second; ++nit) {
1161 auto proofs_range = remoteProofsByNodeId.equal_range(nit->nodeid);
1162 for (auto &proofit = proofs_range.first;
1163 proofit != proofs_range.second; ++proofit) {
1164 if (!proofit->present) {
1165 missing_per_peer[peerid].insert(proofit->proofid);
1166 }
1167 }
1168 };
1169 }
1170
1171 double missing_score{0};
1172
1173 // Now compute a score for the missing proof
1174 for (const auto &[peerid, missingProofs] : missing_per_peer) {
1175 if (missingProofs.size() > 3) {
1176 // Ignore peers with too many missing proofs
1177 continue;
1178 }
1179
1180 auto pit = peers.find(peerid);
1181 if (pit == peers.end()) {
1182 // Peer not found
1183 continue;
1184 }
1185
1186 if (missingProofs.count(proofid) > 0) {
1187 missing_score += pit->getScore();
1188 }
1189 }
1190
1191 return (missing_score / total_score) > 0.3;
1192}
1193
1194std::optional<bool>
1196 auto &remoteProofsView = remoteProofs.get<by_proofid>();
1197 auto [begin, end] = remoteProofsView.equal_range(proofid);
1198
1199 if (begin == end) {
1200 // No remote registered anything yet, we are on our own
1201 return std::nullopt;
1202 }
1203
1204 double total_score{0};
1205 double present_score{0};
1206 double missing_score{0};
1207
1208 for (auto it = begin; it != end; it++) {
1209 auto nit = nodes.find(it->nodeid);
1210 if (nit == nodes.end()) {
1211 // No such node
1212 continue;
1213 }
1214
1215 const PeerId peerid = nit->peerid;
1216
1217 auto pit = peers.find(peerid);
1218 if (pit == peers.end()) {
1219 // Peer not found
1220 continue;
1221 }
1222
1223 uint32_t node_count = pit->node_count;
1224 if (localProof && pit->getProofId() == localProof->getId()) {
1225 // If that's our local proof, account for ourself
1226 ++node_count;
1227 }
1228
1229 if (node_count == 0) {
1230 // should never happen
1231 continue;
1232 }
1233
1234 const double score = double(pit->getScore()) / node_count;
1235
1236 total_score += score;
1237 if (it->present) {
1238 present_score += score;
1239 } else {
1240 missing_score += score;
1241 }
1242 }
1243
1244 if (localProof) {
1245 auto &peersByProofid = peers.get<by_proofid>();
1246
1247 // Do we have a node connected for that proof ?
1248 bool present = false;
1249 auto pit = peersByProofid.find(proofid);
1250 if (pit != peersByProofid.end()) {
1251 present = pit->node_count > 0;
1252 }
1253
1254 pit = peersByProofid.find(localProof->getId());
1255 if (pit != peersByProofid.end()) {
1256 // Also divide by node_count, we can have several nodes even for our
1257 // local proof.
1258 const double score =
1259 double(pit->getScore()) / (1 + pit->node_count);
1260
1261 total_score += score;
1262 if (present) {
1263 present_score += score;
1264 } else {
1265 missing_score += score;
1266 }
1267 }
1268 }
1269
1270 if (present_score / total_score > 0.55) {
1271 return std::make_optional(true);
1272 }
1273
1274 if (missing_score / total_score > 0.55) {
1275 return std::make_optional(false);
1276 }
1277
1278 return std::nullopt;
1279}
1280
1281bool PeerManager::dumpPeersToFile(const fs::path &dumpPath) const {
1282 try {
1283 const fs::path dumpPathTmp = dumpPath + ".new";
1284 FILE *filestr = fsbridge::fopen(dumpPathTmp, "wb");
1285 if (!filestr) {
1286 return false;
1287 }
1288
1289 CAutoFile file(filestr, SER_DISK, CLIENT_VERSION);
1290 file << PEERS_DUMP_VERSION;
1291 file << uint64_t(peers.size());
1292 for (const Peer &peer : peers) {
1293 file << peer.proof;
1294 file << peer.hasFinalized;
1295 file << int64_t(peer.registration_time.count());
1296 file << int64_t(peer.nextPossibleConflictTime.count());
1297 }
1298
1299 if (!FileCommit(file.Get())) {
1300 throw std::runtime_error(strprintf("Failed to commit to file %s",
1301 PathToString(dumpPathTmp)));
1302 }
1303 file.fclose();
1304
1305 if (!RenameOver(dumpPathTmp, dumpPath)) {
1306 throw std::runtime_error(strprintf("Rename failed from %s to %s",
1307 PathToString(dumpPathTmp),
1308 PathToString(dumpPath)));
1309 }
1310 } catch (const std::exception &e) {
1311 LogPrint(BCLog::AVALANCHE, "Failed to dump the avalanche peers: %s.\n",
1312 e.what());
1313 return false;
1314 }
1315
1316 LogPrint(BCLog::AVALANCHE, "Successfully dumped %d peers to %s.\n",
1317 peers.size(), PathToString(dumpPath));
1318
1319 return true;
1320}
1321
1323 const fs::path &dumpPath,
1324 std::unordered_set<ProofRef, SaltedProofHasher> &registeredProofs) {
1325 registeredProofs.clear();
1326
1327 FILE *filestr = fsbridge::fopen(dumpPath, "rb");
1328 CAutoFile file(filestr, SER_DISK, CLIENT_VERSION);
1329 if (file.IsNull()) {
1331 "Failed to open avalanche peers file from disk.\n");
1332 return false;
1333 }
1334
1335 try {
1336 uint64_t version;
1337 file >> version;
1338
1339 if (version != PEERS_DUMP_VERSION) {
1341 "Unsupported avalanche peers file version.\n");
1342 return false;
1343 }
1344
1345 uint64_t numPeers;
1346 file >> numPeers;
1347
1348 auto &peersByProofId = peers.get<by_proofid>();
1349
1350 for (uint64_t i = 0; i < numPeers; i++) {
1351 ProofRef proof;
1352 bool hasFinalized;
1353 int64_t registrationTime;
1354 int64_t nextPossibleConflictTime;
1355
1356 file >> proof;
1357 file >> hasFinalized;
1358 file >> registrationTime;
1359 file >> nextPossibleConflictTime;
1360
1361 if (registerProof(proof)) {
1362 auto it = peersByProofId.find(proof->getId());
1363 if (it == peersByProofId.end()) {
1364 // Should never happen
1365 continue;
1366 }
1367
1368 // We don't modify any key so we don't need to rehash.
1369 // If the modify fails, it means we don't get the full benefit
1370 // from the file but we still added our peer to the set. The
1371 // non-overridden fields will be set the normal way.
1372 peersByProofId.modify(it, [&](Peer &p) {
1373 p.hasFinalized = hasFinalized;
1375 std::chrono::seconds{registrationTime};
1377 std::chrono::seconds{nextPossibleConflictTime};
1378 });
1379
1380 registeredProofs.insert(proof);
1381 }
1382 }
1383 } catch (const std::exception &e) {
1385 "Failed to read the avalanche peers file data on disk: %s.\n",
1386 e.what());
1387 return false;
1388 }
1389
1390 return true;
1391}
1392
1393} // namespace avalanche
ArgsManager gArgs
Definition: args.cpp:38
static constexpr PeerId NO_PEER
Definition: node.h:16
uint32_t PeerId
Definition: node.h:15
static constexpr size_t AVALANCHE_DEFAULT_CONFLICTING_PROOF_COOLDOWN
Conflicting proofs cooldown time default value in seconds.
Definition: avalanche.h:28
int64_t GetIntArg(const std::string &strArg, int64_t nDefault) const
Return integer argument or default value.
Definition: args.cpp:526
bool IsNull() const
Return true if the wrapped FILE* is nullptr, false otherwise.
Definition: streams.h:570
FILE * Get() const
Get wrapped FILE* without transfer of ownership.
Definition: streams.h:567
int fclose()
Definition: streams.h:541
The block chain is a tree shaped structure starting with the genesis block at the root,...
Definition: blockindex.h:25
int64_t GetBlockTime() const
Definition: blockindex.h:180
BlockHash GetBlockHash() const
Definition: blockindex.h:146
void insert(Span< const uint8_t > vKey)
Definition: bloom.cpp:215
bool contains(Span< const uint8_t > vKey) const
Definition: bloom.cpp:249
bool Invalid(Result result, const std::string &reject_reason="", const std::string &debug_message="")
Definition: validation.h:101
Result GetResult() const
Definition: validation.h:122
std::string ToString() const
Definition: validation.h:125
bool selectStakingRewardWinner(const CBlockIndex *pprev, std::vector< std::pair< ProofId, CScript > > &winners)
Deterministically select a list of payout scripts based on the proof set and the previous block hash.
uint32_t connectedPeersScore
Definition: peermanager.h:239
std::vector< RemoteProof > getRemoteProofs(const NodeId nodeid) const
bool removeNode(NodeId nodeid)
bool setFinalized(PeerId peerid)
Latch on that this peer has a finalized proof.
bool dumpPeersToFile(const fs::path &dumpPath) const
RemoteProofSet remoteProofs
Remember which node sent which proof so we have an image of the proof set of our peers.
Definition: peermanager.h:281
bool isDangling(const ProofId &proofid) const
bool updateNextRequestTime(NodeId nodeid, SteadyMilliseconds timeout)
bool unsetFlaky(const ProofId &proofid)
std::optional< bool > getRemotePresenceStatus(const ProofId &proofid) const
Get the presence remote status of a proof.
bool addNodeToPeer(const PeerSet::iterator &it)
Definition: peermanager.cpp:88
bool exists(const ProofId &proofid) const
Return true if the (valid) proof exists, but only for non-dangling proofs.
Definition: peermanager.h:406
bool isRemoteProof(const ProofId &proofid) const
PendingNodeSet pendingNodes
Definition: peermanager.h:225
bool verify() const
Perform consistency check on internal data structures.
bool forPeer(const ProofId &proofid, Callable &&func) const
Definition: peermanager.h:414
bool latchAvaproofsSent(NodeId nodeid)
Flag that a node did send its compact proofs.
bool addNode(NodeId nodeid, const ProofId &proofid)
Node API.
Definition: peermanager.cpp:32
static constexpr int SELECT_PEER_MAX_RETRY
Definition: peermanager.h:227
ProofIdSet m_unbroadcast_proofids
Track proof ids to broadcast.
Definition: peermanager.h:233
bool loadPeersFromFile(const fs::path &dumpPath, std::unordered_set< ProofRef, SaltedProofHasher > &registeredProofs)
RejectionMode
Rejection mode.
Definition: peermanager.h:394
void addUnbroadcastProof(const ProofId &proofid)
Proof broadcast API.
std::unordered_set< ProofRef, SaltedProofHasher > updatedBlockTip()
Update the peer set when a new block is connected.
void removeUnbroadcastProof(const ProofId &proofid)
bool isBoundToPeer(const ProofId &proofid) const
ProofRadixTree shareableProofs
Definition: peermanager.h:191
bool saveRemoteProof(const ProofId &proofid, const NodeId nodeid, const bool present)
CRollingBloomFilter invalidProofs
Filter for proofs that are consensus-invalid or were recently invalidated by avalanche (finalized rej...
Definition: peermanager.h:295
uint64_t compact()
Trigger maintenance of internal data structures.
std::vector< Slot > slots
Definition: peermanager.h:163
uint32_t totalPeersScore
Quorum management.
Definition: peermanager.h:238
ProofPool danglingProofPool
Definition: peermanager.h:188
void setInvalid(const ProofId &proofid)
bool isFlaky(const ProofId &proofid) const
ChainstateManager & chainman
Definition: peermanager.h:243
bool isInvalid(const ProofId &proofid) const
std::unordered_set< ProofId, SaltedProofIdHasher > manualFlakyProofids
Definition: peermanager.h:297
bool removePeer(const PeerId peerid)
Remove an existing peer.
bool isImmature(const ProofId &proofid) const
bool addOrUpdateNode(const PeerSet::iterator &it, NodeId nodeid)
Definition: peermanager.cpp:47
bool rejectProof(const ProofId &proofid, RejectionMode mode=RejectionMode::DEFAULT)
ProofPool immatureProofPool
Definition: peermanager.h:187
RegistrationMode
Registration mode.
Definition: peermanager.h:371
ProofPool conflictingProofPool
Definition: peermanager.h:186
static constexpr size_t MAX_REMOTE_PROOFS
Definition: peermanager.h:300
bool setFlaky(const ProofId &proofid)
std::atomic< bool > needMoreNodes
Flag indicating that we failed to select a node and need to expand our node set.
Definition: peermanager.h:211
PeerId selectPeer() const
Randomly select a peer to poll.
bool isInConflictingPool(const ProofId &proofid) const
static constexpr int SELECT_NODE_MAX_RETRY
Definition: peermanager.h:228
void cleanupDanglingProofs(std::unordered_set< ProofRef, SaltedProofHasher > &registeredProofs)
ProofRef getProof(const ProofId &proofid) const
bool registerProof(const ProofRef &proof, ProofRegistrationState &registrationState, RegistrationMode mode=RegistrationMode::DEFAULT)
bool removeNodeFromPeer(const PeerSet::iterator &it, uint32_t count=1)
bool updateNextPossibleConflictTime(PeerId peerid, const std::chrono::seconds &nextTime)
Proof and Peer related API.
void moveToConflictingPool(const ProofContainer &proofs)
AddProofStatus addProofIfPreferred(const ProofRef &proof, ConflictingProofSet &conflictingProofs)
Attempt to add a proof to the pool.
Definition: proofpool.cpp:54
size_t size() const
Definition: proofpool.h:135
AddProofStatus addProofIfNoConflict(const ProofRef &proof, ConflictingProofSet &conflictingProofs)
Attempt to add a proof to the pool, and fail if there is a conflict on any UTXO.
Definition: proofpool.cpp:13
size_t countProofs() const
Definition: proofpool.cpp:129
bool removeProof(ProofId proofid)
Definition: proofpool.cpp:79
void forEachProof(Callable &&func) const
Definition: proofpool.h:118
ProofRef getProof(const ProofId &proofid) const
Definition: proofpool.cpp:112
std::set< ProofRef, ConflictingProofComparator > ConflictingProofSet
Definition: proofpool.h:88
ProofRef getLowestScoreProof() const
Definition: proofpool.cpp:123
std::unordered_set< ProofRef, SaltedProofHasher > rescan(PeerManager &peerManager)
Definition: proofpool.cpp:86
std::string ToString() const
Definition: uint256.h:80
Path class wrapper to block calls to the fs::path(std::string) implicit constructor and the fs::path:...
Definition: fs.h:30
static const uint256 ZERO
Definition: uint256.h:134
static constexpr int CLIENT_VERSION
bitcoind-res.rc includes this file, but it cannot cope with real c++ code.
Definition: clientversion.h:38
RecursiveMutex cs_main
Mutex to guard access to validation specific variables, such as reading or changing the chainstate.
Definition: cs_main.cpp:7
bool RenameOver(fs::path src, fs::path dest)
Definition: fs_helpers.cpp:272
bool FileCommit(FILE *file)
Ensure file contents are fully committed to disk, using a platform-specific feature analogous to fsyn...
Definition: fs_helpers.cpp:125
#define LogPrint(category,...)
Definition: logging.h:238
#define LogPrintf(...)
Definition: logging.h:227
@ AVALANCHE
Definition: logging.h:62
ProofRegistrationResult
Definition: peermanager.h:145
static constexpr uint32_t AVALANCHE_MAX_IMMATURE_PROOFS
Maximum number of immature proofs the peer manager will accept from the network.
Definition: peermanager.h:44
static bool isImmatureState(const ProofValidationState &state)
static constexpr uint64_t PEERS_DUMP_VERSION
Definition: peermanager.cpp:30
PeerId selectPeerImpl(const std::vector< Slot > &slots, const uint64_t slot, const uint64_t max)
Internal methods that are exposed for testing purposes.
RCUPtr< const Proof > ProofRef
Definition: proof.h:185
static std::string PathToString(const path &path)
Convert path object to byte string.
Definition: fs.h:142
FILE * fopen(const fs::path &p, const char *mode)
Definition: fs.cpp:30
static constexpr NodeId NO_NODE
Special NodeId that represent no node.
Definition: nodeid.h:15
int64_t NodeId
Definition: nodeid.h:10
T GetRand(T nMax=std::numeric_limits< T >::max()) noexcept
Generate a uniform random integer of type T in the range [0..nMax) nMax defaults to std::numeric_limi...
Definition: random.h:85
@ SER_DISK
Definition: serialize.h:153
A BlockHash is a unqiue identifier for a block.
Definition: blockhash.h:13
RCUPtr< T > remove(const KeyType &key)
Remove an element from the tree.
Definition: radix.h:181
RCUPtr< T > get(const KeyType &key)
Get the value corresponding to a key.
Definition: radix.h:118
bool forEachLeaf(Callable &&func) const
Definition: radix.h:144
bool insert(const RCUPtr< T > &value)
Insert a value into the tree.
Definition: radix.h:112
Facility for using an uint256 as a radix tree key.
SteadyMilliseconds nextRequestTime
Definition: node.h:23
bool avaproofsSent
Definition: node.h:24
std::chrono::seconds registration_time
Definition: peermanager.h:93
std::chrono::seconds nextPossibleConflictTime
Definition: peermanager.h:94
uint32_t node_count
Definition: peermanager.h:87
static constexpr auto DANGLING_TIMEOUT
Consider dropping the peer if no node is attached after this timeout expired.
Definition: peermanager.h:102
uint32_t index
Definition: peermanager.h:86
uint32_t getScore() const
Definition: peermanager.h:111
ProofRef proof
Definition: peermanager.h:89
uint64_t getStop() const
Definition: peermanager.h:73
uint64_t getStart() const
Definition: peermanager.h:72
PeerId getPeerId() const
Definition: peermanager.h:75
StakeContenderIds are unique for each block to ensure that the peer polling for their acceptance has ...
double ComputeProofRewardRank(uint32_t proofScore) const
To make sure the selection is properly weighted according to the proof score, we normalize the conten...
#define LOCK(cs)
Definition: sync.h:306
#define WITH_LOCK(cs, code)
Run code while locking a mutex.
Definition: sync.h:357
static int count
Definition: tests.c:31
#define NO_THREAD_SAFETY_ANALYSIS
Definition: threadsafety.h:58
int64_t GetTime()
DEPRECATED Use either ClockType::now() or Now<TimePointType>() if a cast is needed.
Definition: time.cpp:109
std::chrono::time_point< std::chrono::steady_clock, std::chrono::milliseconds > SteadyMilliseconds
Definition: time.h:31
#define strprintf
Format arguments and return the string or write to given std::ostream (see tinyformat::format doc for...
Definition: tinyformat.h:1202
AssertLockHeld(pool.cs)
assert(!tx.IsCoinBase())