|
|
|
@ -154,8 +154,8 @@ namespace {
@@ -154,8 +154,8 @@ namespace {
|
|
|
|
|
uint256 hash; |
|
|
|
|
CBlockIndex *pindex; //! Optional.
|
|
|
|
|
int64_t nTime; //! Time of "getdata" request in microseconds.
|
|
|
|
|
int nValidatedQueuedBefore; //! Number of blocks queued with validated headers (globally) at the time this one is requested.
|
|
|
|
|
bool fValidatedHeaders; //! Whether this block has validated headers at the time of request.
|
|
|
|
|
int64_t nTimeDisconnect; //! The timeout for this block request (for disconnecting a slow peer)
|
|
|
|
|
}; |
|
|
|
|
map<uint256, pair<NodeId, list<QueuedBlock>::iterator> > mapBlocksInFlight; |
|
|
|
|
|
|
|
|
@ -216,6 +216,7 @@ struct CNodeState {
@@ -216,6 +216,7 @@ struct CNodeState {
|
|
|
|
|
int64_t nStallingSince; |
|
|
|
|
list<QueuedBlock> vBlocksInFlight; |
|
|
|
|
int nBlocksInFlight; |
|
|
|
|
int nBlocksInFlightValidHeaders; |
|
|
|
|
//! Whether we consider this a preferred download peer.
|
|
|
|
|
bool fPreferredDownload; |
|
|
|
|
|
|
|
|
@ -229,6 +230,7 @@ struct CNodeState {
@@ -229,6 +230,7 @@ struct CNodeState {
|
|
|
|
|
fSyncStarted = false; |
|
|
|
|
nStallingSince = 0; |
|
|
|
|
nBlocksInFlight = 0; |
|
|
|
|
nBlocksInFlightValidHeaders = 0; |
|
|
|
|
fPreferredDownload = false; |
|
|
|
|
} |
|
|
|
|
}; |
|
|
|
@ -260,6 +262,12 @@ void UpdatePreferredDownload(CNode* node, CNodeState* state)
@@ -260,6 +262,12 @@ void UpdatePreferredDownload(CNode* node, CNodeState* state)
|
|
|
|
|
nPreferredDownload += state->fPreferredDownload; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
// Returns time at which to timeout block request (nTime in microseconds)
|
|
|
|
|
int64_t GetBlockTimeout(int64_t nTime, int nValidatedQueuedBefore) |
|
|
|
|
{ |
|
|
|
|
return nTime + 500000 * Params().GetConsensus().nPowTargetSpacing * (4 + nValidatedQueuedBefore); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
void InitializeNode(NodeId nodeid, const CNode *pnode) { |
|
|
|
|
LOCK(cs_main); |
|
|
|
|
CNodeState &state = mapNodeState.insert(std::make_pair(nodeid, CNodeState())).first->second; |
|
|
|
@ -292,6 +300,7 @@ void MarkBlockAsReceived(const uint256& hash) {
@@ -292,6 +300,7 @@ void MarkBlockAsReceived(const uint256& hash) {
|
|
|
|
|
if (itInFlight != mapBlocksInFlight.end()) { |
|
|
|
|
CNodeState *state = State(itInFlight->second.first); |
|
|
|
|
nQueuedValidatedHeaders -= itInFlight->second.second->fValidatedHeaders; |
|
|
|
|
state->nBlocksInFlightValidHeaders -= itInFlight->second.second->fValidatedHeaders; |
|
|
|
|
state->vBlocksInFlight.erase(itInFlight->second.second); |
|
|
|
|
state->nBlocksInFlight--; |
|
|
|
|
state->nStallingSince = 0; |
|
|
|
@ -307,10 +316,12 @@ void MarkBlockAsInFlight(NodeId nodeid, const uint256& hash, CBlockIndex *pindex
@@ -307,10 +316,12 @@ void MarkBlockAsInFlight(NodeId nodeid, const uint256& hash, CBlockIndex *pindex
|
|
|
|
|
// Make sure it's not listed somewhere already.
|
|
|
|
|
MarkBlockAsReceived(hash); |
|
|
|
|
|
|
|
|
|
QueuedBlock newentry = {hash, pindex, GetTimeMicros(), nQueuedValidatedHeaders, pindex != NULL}; |
|
|
|
|
int64_t nNow = GetTimeMicros(); |
|
|
|
|
QueuedBlock newentry = {hash, pindex, nNow, pindex != NULL, GetBlockTimeout(nNow, nQueuedValidatedHeaders)}; |
|
|
|
|
nQueuedValidatedHeaders += newentry.fValidatedHeaders; |
|
|
|
|
list<QueuedBlock>::iterator it = state->vBlocksInFlight.insert(state->vBlocksInFlight.end(), newentry); |
|
|
|
|
state->nBlocksInFlight++; |
|
|
|
|
state->nBlocksInFlightValidHeaders += newentry.fValidatedHeaders; |
|
|
|
|
mapBlocksInFlight[hash] = std::make_pair(nodeid, it); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
@ -5015,9 +5026,22 @@ bool SendMessages(CNode* pto, bool fSendTrickle)
@@ -5015,9 +5026,22 @@ bool SendMessages(CNode* pto, bool fSendTrickle)
|
|
|
|
|
// timeout. We compensate for in-flight blocks to prevent killing off peers due to our own downstream link
|
|
|
|
|
// being saturated. We only count validated in-flight blocks so peers can't advertise non-existing block hashes
|
|
|
|
|
// to unreasonably increase our timeout.
|
|
|
|
|
if (!pto->fDisconnect && state.vBlocksInFlight.size() > 0 && state.vBlocksInFlight.front().nTime < nNow - 500000 * consensusParams.nPowTargetSpacing * (4 + state.vBlocksInFlight.front().nValidatedQueuedBefore)) { |
|
|
|
|
LogPrintf("Timeout downloading block %s from peer=%d, disconnecting\n", state.vBlocksInFlight.front().hash.ToString(), pto->id); |
|
|
|
|
pto->fDisconnect = true; |
|
|
|
|
// We also compare the block download timeout originally calculated against the time at which we'd disconnect
|
|
|
|
|
// if we assumed the block were being requested now (ignoring blocks we've requested from this peer, since we're
|
|
|
|
|
// only looking at this peer's oldest request). This way a large queue in the past doesn't result in a
|
|
|
|
|
// permanently large window for this block to be delivered (ie if the number of blocks in flight is decreasing
|
|
|
|
|
// more quickly than once every 5 minutes, then we'll shorten the download window for this block).
|
|
|
|
|
if (!pto->fDisconnect && state.vBlocksInFlight.size() > 0) { |
|
|
|
|
QueuedBlock &queuedBlock = state.vBlocksInFlight.front(); |
|
|
|
|
int64_t nTimeoutIfRequestedNow = GetBlockTimeout(nNow, nQueuedValidatedHeaders - state.nBlocksInFlightValidHeaders); |
|
|
|
|
if (queuedBlock.nTimeDisconnect > nTimeoutIfRequestedNow) { |
|
|
|
|
LogPrint("net", "Reducing block download timeout for peer=%d block=%s, orig=%d new=%d\n", pto->id, queuedBlock.hash.ToString(), queuedBlock.nTimeDisconnect, nTimeoutIfRequestedNow); |
|
|
|
|
queuedBlock.nTimeDisconnect = nTimeoutIfRequestedNow; |
|
|
|
|
} |
|
|
|
|
if (queuedBlock.nTimeDisconnect < nNow) { |
|
|
|
|
LogPrintf("Timeout downloading block %s from peer=%d, disconnecting\n", queuedBlock.hash.ToString(), pto->id); |
|
|
|
|
pto->fDisconnect = true; |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
//
|
|
|
|
|