|
|
|
@ -3577,6 +3577,7 @@ void static ProcessGetData(CNode* pfrom)
@@ -3577,6 +3577,7 @@ void static ProcessGetData(CNode* pfrom)
|
|
|
|
|
|
|
|
|
|
bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv, int64_t nTimeReceived) |
|
|
|
|
{ |
|
|
|
|
const CChainParams& chainparams = Params(); |
|
|
|
|
RandAddSeedPerfmon(); |
|
|
|
|
LogPrint("net", "received: %s (%u bytes) peer=%d\n", SanitizeString(strCommand), vRecv.size(), pfrom->id); |
|
|
|
|
if (mapArgs.count("-dropmessagestest") && GetRand(atoi(mapArgs["-dropmessagestest"])) == 0) |
|
|
|
@ -3836,7 +3837,7 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
@@ -3836,7 +3837,7 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
|
|
|
|
|
// not a direct successor.
|
|
|
|
|
pfrom->PushMessage("getheaders", chainActive.GetLocator(pindexBestHeader), inv.hash); |
|
|
|
|
CNodeState *nodestate = State(pfrom->GetId()); |
|
|
|
|
if (chainActive.Tip()->GetBlockTime() > GetAdjustedTime() - Params().TargetSpacing() * 20 && |
|
|
|
|
if (chainActive.Tip()->GetBlockTime() > GetAdjustedTime() - chainparams.GetConsensus().nPowTargetSpacing * 20 && |
|
|
|
|
nodestate->nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER) { |
|
|
|
|
vToFetch.push_back(inv); |
|
|
|
|
// Mark block as in flight already, even though the actual "getdata" message only goes out
|
|
|
|
@ -4499,6 +4500,7 @@ bool ProcessMessages(CNode* pfrom)
@@ -4499,6 +4500,7 @@ bool ProcessMessages(CNode* pfrom)
|
|
|
|
|
|
|
|
|
|
bool SendMessages(CNode* pto, bool fSendTrickle) |
|
|
|
|
{ |
|
|
|
|
const Consensus::Params& consensusParams = Params().GetConsensus(); |
|
|
|
|
{ |
|
|
|
|
// Don't send anything until we get their version message
|
|
|
|
|
if (pto->nVersion == 0) |
|
|
|
@ -4686,7 +4688,7 @@ bool SendMessages(CNode* pto, bool fSendTrickle)
@@ -4686,7 +4688,7 @@ bool SendMessages(CNode* pto, bool fSendTrickle)
|
|
|
|
|
// timeout. We compensate for in-flight blocks to prevent killing off peers due to our own downstream link
|
|
|
|
|
// being saturated. We only count validated in-flight blocks so peers can't advertize nonexisting block hashes
|
|
|
|
|
// to unreasonably increase our timeout.
|
|
|
|
|
if (!pto->fDisconnect && state.vBlocksInFlight.size() > 0 && state.vBlocksInFlight.front().nTime < nNow - 500000 * Params().TargetSpacing() * (4 + state.vBlocksInFlight.front().nValidatedQueuedBefore)) { |
|
|
|
|
if (!pto->fDisconnect && state.vBlocksInFlight.size() > 0 && state.vBlocksInFlight.front().nTime < nNow - 500000 * consensusParams.nPowTargetSpacing * (4 + state.vBlocksInFlight.front().nValidatedQueuedBefore)) { |
|
|
|
|
LogPrintf("Timeout downloading block %s from peer=%d, disconnecting\n", state.vBlocksInFlight.front().hash.ToString(), pto->id); |
|
|
|
|
pto->fDisconnect = true; |
|
|
|
|
} |
|
|
|
|