Browse Source

Track tip update time and last new block announcement from each peer

0.16
Suhas Daftuar 7 years ago
parent
commit
db32a65897
  1. 37
      src/net_processing.cpp

37
src/net_processing.cpp

@ -127,6 +127,10 @@ namespace {
/** Number of outbound peers with m_chain_sync.m_protect. */ /** Number of outbound peers with m_chain_sync.m_protect. */
int g_outbound_peers_with_protect_from_disconnect = 0; int g_outbound_peers_with_protect_from_disconnect = 0;
/** When our tip was last updated. */
int64_t g_last_tip_update = 0;
/** Relay map, protected by cs_main. */ /** Relay map, protected by cs_main. */
typedef std::map<uint256, CTransactionRef> MapRelay; typedef std::map<uint256, CTransactionRef> MapRelay;
MapRelay mapRelay; MapRelay mapRelay;
@ -231,6 +235,9 @@ struct CNodeState {
ChainSyncTimeoutState m_chain_sync; ChainSyncTimeoutState m_chain_sync;
//! Time of last new block announcement
int64_t m_last_block_announcement;
CNodeState(CAddress addrIn, std::string addrNameIn) : address(addrIn), name(addrNameIn) { CNodeState(CAddress addrIn, std::string addrNameIn) : address(addrIn), name(addrNameIn) {
fCurrentlyConnected = false; fCurrentlyConnected = false;
nMisbehavior = 0; nMisbehavior = 0;
@ -254,6 +261,7 @@ struct CNodeState {
fWantsCmpctWitness = false; fWantsCmpctWitness = false;
fSupportsDesiredCmpctVersion = false; fSupportsDesiredCmpctVersion = false;
m_chain_sync = { 0, nullptr, false, false }; m_chain_sync = { 0, nullptr, false, false };
m_last_block_announcement = 0;
} }
}; };
@ -797,6 +805,8 @@ void PeerLogicValidation::BlockConnected(const std::shared_ptr<const CBlock>& pb
} }
LogPrint(BCLog::MEMPOOL, "Erased %d orphan tx included or conflicted by block\n", nErased); LogPrint(BCLog::MEMPOOL, "Erased %d orphan tx included or conflicted by block\n", nErased);
} }
g_last_tip_update = GetTime();
} }
// All of the following cache a recent block, and are protected by cs_most_recent_block // All of the following cache a recent block, and are protected by cs_most_recent_block
@ -1215,6 +1225,7 @@ bool static ProcessHeadersMessage(CNode *pfrom, CConnman *connman, const std::ve
return true; return true;
} }
bool received_new_header = false;
const CBlockIndex *pindexLast = nullptr; const CBlockIndex *pindexLast = nullptr;
{ {
LOCK(cs_main); LOCK(cs_main);
@ -1255,6 +1266,12 @@ bool static ProcessHeadersMessage(CNode *pfrom, CConnman *connman, const std::ve
} }
hashLastBlock = header.GetHash(); hashLastBlock = header.GetHash();
} }
// If we don't have the last header, then they'll have given us
// something new (if these headers are valid).
if (mapBlockIndex.find(hashLastBlock) == mapBlockIndex.end()) {
received_new_header = true;
}
} }
CValidationState state; CValidationState state;
@ -1319,6 +1336,10 @@ bool static ProcessHeadersMessage(CNode *pfrom, CConnman *connman, const std::ve
// because it is set in UpdateBlockAvailability. Some nullptr checks // because it is set in UpdateBlockAvailability. Some nullptr checks
// are still present, however, as belt-and-suspenders. // are still present, however, as belt-and-suspenders.
if (received_new_header && pindexLast->nChainWork > chainActive.Tip()->nChainWork) {
nodestate->m_last_block_announcement = GetTime();
}
if (nCount == MAX_HEADERS_RESULTS) { if (nCount == MAX_HEADERS_RESULTS) {
// Headers message had its maximum size; the peer may have more headers. // Headers message had its maximum size; the peer may have more headers.
// TODO: optimize: if pindexLast is an ancestor of chainActive.Tip or pindexBestHeader, continue // TODO: optimize: if pindexLast is an ancestor of chainActive.Tip or pindexBestHeader, continue
@ -2219,6 +2240,8 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
CBlockHeaderAndShortTxIDs cmpctblock; CBlockHeaderAndShortTxIDs cmpctblock;
vRecv >> cmpctblock; vRecv >> cmpctblock;
bool received_new_header = false;
{ {
LOCK(cs_main); LOCK(cs_main);
@ -2228,6 +2251,10 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::GETHEADERS, chainActive.GetLocator(pindexBestHeader), uint256())); connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::GETHEADERS, chainActive.GetLocator(pindexBestHeader), uint256()));
return true; return true;
} }
if (mapBlockIndex.find(cmpctblock.header.GetHash()) == mapBlockIndex.end()) {
received_new_header = true;
}
} }
const CBlockIndex *pindex = nullptr; const CBlockIndex *pindex = nullptr;
@ -2266,6 +2293,14 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
assert(pindex); assert(pindex);
UpdateBlockAvailability(pfrom->GetId(), pindex->GetBlockHash()); UpdateBlockAvailability(pfrom->GetId(), pindex->GetBlockHash());
CNodeState *nodestate = State(pfrom->GetId());
// If this was a new header with more work than our tip, update the
// peer's last block announcement time
if (received_new_header && pindex->nChainWork > chainActive.Tip()->nChainWork) {
nodestate->m_last_block_announcement = GetTime();
}
std::map<uint256, std::pair<NodeId, std::list<QueuedBlock>::iterator> >::iterator blockInFlightIt = mapBlocksInFlight.find(pindex->GetBlockHash()); std::map<uint256, std::pair<NodeId, std::list<QueuedBlock>::iterator> >::iterator blockInFlightIt = mapBlocksInFlight.find(pindex->GetBlockHash());
bool fAlreadyInFlight = blockInFlightIt != mapBlocksInFlight.end(); bool fAlreadyInFlight = blockInFlightIt != mapBlocksInFlight.end();
@ -2288,8 +2323,6 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
if (!fAlreadyInFlight && !CanDirectFetch(chainparams.GetConsensus())) if (!fAlreadyInFlight && !CanDirectFetch(chainparams.GetConsensus()))
return true; return true;
CNodeState *nodestate = State(pfrom->GetId());
if (IsWitnessEnabled(pindex->pprev, chainparams.GetConsensus()) && !nodestate->fSupportsDesiredCmpctVersion) { if (IsWitnessEnabled(pindex->pprev, chainparams.GetConsensus()) && !nodestate->fSupportsDesiredCmpctVersion) {
// Don't bother trying to process compact blocks from v1 peers // Don't bother trying to process compact blocks from v1 peers
// after segwit activates. // after segwit activates.

Loading…
Cancel
Save