|
|
@ -470,7 +470,7 @@ void UpdateBlockAvailability(NodeId nodeid, const uint256 &hash) { |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
void MaybeSetPeerAsAnnouncingHeaderAndIDs(const CNodeState* nodestate, CNode* pfrom) { |
|
|
|
void MaybeSetPeerAsAnnouncingHeaderAndIDs(const CNodeState* nodestate, CNode* pfrom, CConnman& connman) { |
|
|
|
if (nLocalServices & NODE_WITNESS) { |
|
|
|
if (nLocalServices & NODE_WITNESS) { |
|
|
|
// Don't ever request compact blocks when segwit is enabled.
|
|
|
|
// Don't ever request compact blocks when segwit is enabled.
|
|
|
|
return; |
|
|
|
return; |
|
|
@ -484,12 +484,13 @@ void MaybeSetPeerAsAnnouncingHeaderAndIDs(const CNodeState* nodestate, CNode* pf |
|
|
|
if (lNodesAnnouncingHeaderAndIDs.size() >= 3) { |
|
|
|
if (lNodesAnnouncingHeaderAndIDs.size() >= 3) { |
|
|
|
// As per BIP152, we only get 3 of our peers to announce
|
|
|
|
// As per BIP152, we only get 3 of our peers to announce
|
|
|
|
// blocks using compact encodings.
|
|
|
|
// blocks using compact encodings.
|
|
|
|
CNode* pnodeStop = FindNode(lNodesAnnouncingHeaderAndIDs.front()); |
|
|
|
bool found = connman.ForNode(lNodesAnnouncingHeaderAndIDs.front(), [fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion](CNode* pnodeStop){ |
|
|
|
if (pnodeStop) { |
|
|
|
|
|
|
|
pnodeStop->PushMessage(NetMsgType::SENDCMPCT, fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion); |
|
|
|
pnodeStop->PushMessage(NetMsgType::SENDCMPCT, fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion); |
|
|
|
|
|
|
|
return true; |
|
|
|
|
|
|
|
}); |
|
|
|
|
|
|
|
if(found) |
|
|
|
lNodesAnnouncingHeaderAndIDs.pop_front(); |
|
|
|
lNodesAnnouncingHeaderAndIDs.pop_front(); |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
fAnnounceUsingCMPCTBLOCK = true; |
|
|
|
fAnnounceUsingCMPCTBLOCK = true; |
|
|
|
pfrom->PushMessage(NetMsgType::SENDCMPCT, fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion); |
|
|
|
pfrom->PushMessage(NetMsgType::SENDCMPCT, fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion); |
|
|
|
lNodesAnnouncingHeaderAndIDs.push_back(pfrom->GetId()); |
|
|
|
lNodesAnnouncingHeaderAndIDs.push_back(pfrom->GetId()); |
|
|
@ -3089,15 +3090,15 @@ bool ActivateBestChain(CValidationState &state, const CChainParams& chainparams, |
|
|
|
int nBlockEstimate = 0; |
|
|
|
int nBlockEstimate = 0; |
|
|
|
if (fCheckpointsEnabled) |
|
|
|
if (fCheckpointsEnabled) |
|
|
|
nBlockEstimate = Checkpoints::GetTotalBlocksEstimate(chainparams.Checkpoints()); |
|
|
|
nBlockEstimate = Checkpoints::GetTotalBlocksEstimate(chainparams.Checkpoints()); |
|
|
|
{ |
|
|
|
if(connman) { |
|
|
|
LOCK(cs_vNodes); |
|
|
|
connman->ForEachNode([nNewHeight, nBlockEstimate, &vHashes](CNode* pnode) { |
|
|
|
BOOST_FOREACH(CNode* pnode, vNodes) { |
|
|
|
|
|
|
|
if (nNewHeight > (pnode->nStartingHeight != -1 ? pnode->nStartingHeight - 2000 : nBlockEstimate)) { |
|
|
|
if (nNewHeight > (pnode->nStartingHeight != -1 ? pnode->nStartingHeight - 2000 : nBlockEstimate)) { |
|
|
|
BOOST_REVERSE_FOREACH(const uint256& hash, vHashes) { |
|
|
|
BOOST_REVERSE_FOREACH(const uint256& hash, vHashes) { |
|
|
|
pnode->PushBlockHash(hash); |
|
|
|
pnode->PushBlockHash(hash); |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
return true; |
|
|
|
|
|
|
|
}); |
|
|
|
} |
|
|
|
} |
|
|
|
// Notify external listeners about the new tip.
|
|
|
|
// Notify external listeners about the new tip.
|
|
|
|
if (!vHashes.empty()) { |
|
|
|
if (!vHashes.empty()) { |
|
|
@ -4726,6 +4727,45 @@ bool static AlreadyHave(const CInv& inv) EXCLUSIVE_LOCKS_REQUIRED(cs_main) |
|
|
|
return true; |
|
|
|
return true; |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
static void RelayTransaction(const CTransaction& tx, CConnman& connman) |
|
|
|
|
|
|
|
{ |
|
|
|
|
|
|
|
CInv inv(MSG_TX, tx.GetHash()); |
|
|
|
|
|
|
|
connman.ForEachNode([&inv](CNode* pnode) |
|
|
|
|
|
|
|
{ |
|
|
|
|
|
|
|
pnode->PushInventory(inv); |
|
|
|
|
|
|
|
return true; |
|
|
|
|
|
|
|
}); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
static void RelayAddress(const CAddress& addr, bool fReachable, CConnman& connman) |
|
|
|
|
|
|
|
{ |
|
|
|
|
|
|
|
int nRelayNodes = fReachable ? 2 : 1; // limited relaying of addresses outside our network(s)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// Relay to a limited number of other nodes
|
|
|
|
|
|
|
|
// Use deterministic randomness to send to the same nodes for 24 hours
|
|
|
|
|
|
|
|
// at a time so the addrKnowns of the chosen nodes prevent repeats
|
|
|
|
|
|
|
|
static const uint64_t salt0 = GetRand(std::numeric_limits<uint64_t>::max()); |
|
|
|
|
|
|
|
static const uint64_t salt1 = GetRand(std::numeric_limits<uint64_t>::max()); |
|
|
|
|
|
|
|
uint64_t hashAddr = addr.GetHash(); |
|
|
|
|
|
|
|
std::multimap<uint64_t, CNode*> mapMix; |
|
|
|
|
|
|
|
const CSipHasher hasher = CSipHasher(salt0, salt1).Write(hashAddr << 32).Write((GetTime() + hashAddr) / (24*60*60)); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
auto sortfunc = [&mapMix, &hasher](CNode* pnode) { |
|
|
|
|
|
|
|
if (pnode->nVersion >= CADDR_TIME_VERSION) { |
|
|
|
|
|
|
|
uint64_t hashKey = CSipHasher(hasher).Write(pnode->id).Finalize(); |
|
|
|
|
|
|
|
mapMix.emplace(hashKey, pnode); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
return true; |
|
|
|
|
|
|
|
}; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
auto pushfunc = [&addr, &mapMix, &nRelayNodes] { |
|
|
|
|
|
|
|
for (auto mi = mapMix.begin(); mi != mapMix.end() && nRelayNodes-- > 0; ++mi) |
|
|
|
|
|
|
|
mi->second->PushAddress(addr); |
|
|
|
|
|
|
|
}; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
connman.ForEachNodeThen(std::move(sortfunc), std::move(pushfunc)); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
void static ProcessGetData(CNode* pfrom, const Consensus::Params& consensusParams) |
|
|
|
void static ProcessGetData(CNode* pfrom, const Consensus::Params& consensusParams) |
|
|
|
{ |
|
|
|
{ |
|
|
|
std::deque<CInv>::iterator it = pfrom->vRecvGetData.begin(); |
|
|
|
std::deque<CInv>::iterator it = pfrom->vRecvGetData.begin(); |
|
|
@ -5135,26 +5175,7 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv, |
|
|
|
if (addr.nTime > nSince && !pfrom->fGetAddr && vAddr.size() <= 10 && addr.IsRoutable()) |
|
|
|
if (addr.nTime > nSince && !pfrom->fGetAddr && vAddr.size() <= 10 && addr.IsRoutable()) |
|
|
|
{ |
|
|
|
{ |
|
|
|
// Relay to a limited number of other nodes
|
|
|
|
// Relay to a limited number of other nodes
|
|
|
|
{ |
|
|
|
RelayAddress(addr, fReachable, connman); |
|
|
|
LOCK(cs_vNodes); |
|
|
|
|
|
|
|
// Use deterministic randomness to send to the same nodes for 24 hours
|
|
|
|
|
|
|
|
// at a time so the addrKnowns of the chosen nodes prevent repeats
|
|
|
|
|
|
|
|
static const uint64_t salt0 = GetRand(std::numeric_limits<uint64_t>::max()); |
|
|
|
|
|
|
|
static const uint64_t salt1 = GetRand(std::numeric_limits<uint64_t>::max()); |
|
|
|
|
|
|
|
uint64_t hashAddr = addr.GetHash(); |
|
|
|
|
|
|
|
multimap<uint64_t, CNode*> mapMix; |
|
|
|
|
|
|
|
const CSipHasher hasher = CSipHasher(salt0, salt1).Write(hashAddr << 32).Write((GetTime() + hashAddr) / (24*60*60)); |
|
|
|
|
|
|
|
BOOST_FOREACH(CNode* pnode, vNodes) |
|
|
|
|
|
|
|
{ |
|
|
|
|
|
|
|
if (pnode->nVersion < CADDR_TIME_VERSION) |
|
|
|
|
|
|
|
continue; |
|
|
|
|
|
|
|
uint64_t hashKey = CSipHasher(hasher).Write(pnode->id).Finalize(); |
|
|
|
|
|
|
|
mapMix.insert(make_pair(hashKey, pnode)); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
int nRelayNodes = fReachable ? 2 : 1; // limited relaying of addresses outside our network(s)
|
|
|
|
|
|
|
|
for (multimap<uint64_t, CNode*>::iterator mi = mapMix.begin(); mi != mapMix.end() && nRelayNodes-- > 0; ++mi) |
|
|
|
|
|
|
|
((*mi).second)->PushAddress(addr); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
} |
|
|
|
} |
|
|
|
// Do not store addresses outside our network
|
|
|
|
// Do not store addresses outside our network
|
|
|
|
if (fReachable) |
|
|
|
if (fReachable) |
|
|
@ -5448,7 +5469,7 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv, |
|
|
|
|
|
|
|
|
|
|
|
if (!AlreadyHave(inv) && AcceptToMemoryPool(mempool, state, tx, true, &fMissingInputs)) { |
|
|
|
if (!AlreadyHave(inv) && AcceptToMemoryPool(mempool, state, tx, true, &fMissingInputs)) { |
|
|
|
mempool.check(pcoinsTip); |
|
|
|
mempool.check(pcoinsTip); |
|
|
|
RelayTransaction(tx); |
|
|
|
RelayTransaction(tx, connman); |
|
|
|
for (unsigned int i = 0; i < tx.vout.size(); i++) { |
|
|
|
for (unsigned int i = 0; i < tx.vout.size(); i++) { |
|
|
|
vWorkQueue.emplace_back(inv.hash, i); |
|
|
|
vWorkQueue.emplace_back(inv.hash, i); |
|
|
|
} |
|
|
|
} |
|
|
@ -5485,7 +5506,7 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv, |
|
|
|
continue; |
|
|
|
continue; |
|
|
|
if (AcceptToMemoryPool(mempool, stateDummy, orphanTx, true, &fMissingInputs2)) { |
|
|
|
if (AcceptToMemoryPool(mempool, stateDummy, orphanTx, true, &fMissingInputs2)) { |
|
|
|
LogPrint("mempool", " accepted orphan tx %s\n", orphanHash.ToString()); |
|
|
|
LogPrint("mempool", " accepted orphan tx %s\n", orphanHash.ToString()); |
|
|
|
RelayTransaction(orphanTx); |
|
|
|
RelayTransaction(orphanTx, connman); |
|
|
|
for (unsigned int i = 0; i < orphanTx.vout.size(); i++) { |
|
|
|
for (unsigned int i = 0; i < orphanTx.vout.size(); i++) { |
|
|
|
vWorkQueue.emplace_back(orphanHash, i); |
|
|
|
vWorkQueue.emplace_back(orphanHash, i); |
|
|
|
} |
|
|
|
} |
|
|
@ -5560,7 +5581,7 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv, |
|
|
|
int nDoS = 0; |
|
|
|
int nDoS = 0; |
|
|
|
if (!state.IsInvalid(nDoS) || nDoS == 0) { |
|
|
|
if (!state.IsInvalid(nDoS) || nDoS == 0) { |
|
|
|
LogPrintf("Force relaying tx %s from whitelisted peer=%d\n", tx.GetHash().ToString(), pfrom->id); |
|
|
|
LogPrintf("Force relaying tx %s from whitelisted peer=%d\n", tx.GetHash().ToString(), pfrom->id); |
|
|
|
RelayTransaction(tx); |
|
|
|
RelayTransaction(tx, connman); |
|
|
|
} else { |
|
|
|
} else { |
|
|
|
LogPrintf("Not relaying invalid transaction %s from whitelisted peer=%d (%s)\n", tx.GetHash().ToString(), pfrom->id, FormatStateMessage(state)); |
|
|
|
LogPrintf("Not relaying invalid transaction %s from whitelisted peer=%d (%s)\n", tx.GetHash().ToString(), pfrom->id, FormatStateMessage(state)); |
|
|
|
} |
|
|
|
} |
|
|
@ -5886,7 +5907,7 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv, |
|
|
|
if (nodestate->fProvidesHeaderAndIDs && vGetData.size() == 1 && mapBlocksInFlight.size() == 1 && pindexLast->pprev->IsValid(BLOCK_VALID_CHAIN) && !(nLocalServices & NODE_WITNESS)) { |
|
|
|
if (nodestate->fProvidesHeaderAndIDs && vGetData.size() == 1 && mapBlocksInFlight.size() == 1 && pindexLast->pprev->IsValid(BLOCK_VALID_CHAIN) && !(nLocalServices & NODE_WITNESS)) { |
|
|
|
// We seem to be rather well-synced, so it appears pfrom was the first to provide us
|
|
|
|
// We seem to be rather well-synced, so it appears pfrom was the first to provide us
|
|
|
|
// with this block! Let's get them to announce using compact blocks in the future.
|
|
|
|
// with this block! Let's get them to announce using compact blocks in the future.
|
|
|
|
MaybeSetPeerAsAnnouncingHeaderAndIDs(nodestate, pfrom); |
|
|
|
MaybeSetPeerAsAnnouncingHeaderAndIDs(nodestate, pfrom, connman); |
|
|
|
// In any case, we want to download using a compact block, not a regular one
|
|
|
|
// In any case, we want to download using a compact block, not a regular one
|
|
|
|
vGetData[0] = CInv(MSG_CMPCT_BLOCK, vGetData[0].hash); |
|
|
|
vGetData[0] = CInv(MSG_CMPCT_BLOCK, vGetData[0].hash); |
|
|
|
} |
|
|
|
} |
|
|
|