@ -51,12 +51,13 @@ struct COrphanTx {
@@ -51,12 +51,13 @@ struct COrphanTx {
NodeId fromPeer ;
int64_t nTimeExpire ;
} ;
std : : map < uint256 , COrphanTx > mapOrphanTransactions GUARDED_BY ( cs_main ) ;
std : : map < COutPoint , std : : set < std : : map < uint256 , COrphanTx > : : iterator , IteratorComparator > > mapOrphanTransactionsByPrev GUARDED_BY ( cs_main ) ;
void EraseOrphansFor ( NodeId peer ) EXCLUSIVE_LOCKS_REQUIRED ( cs_main ) ;
static CCriticalSection g_cs_orphans ;
std : : map < uint256 , COrphanTx > mapOrphanTransactions GUARDED_BY ( g_cs_orphans ) ;
std : : map < COutPoint , std : : set < std : : map < uint256 , COrphanTx > : : iterator , IteratorComparator > > mapOrphanTransactionsByPrev GUARDED_BY ( g_cs_orphans ) ;
void EraseOrphansFor ( NodeId peer ) ;
static size_t vExtraTxnForCompactIt = 0 ;
static std : : vector < std : : pair < uint256 , CTransactionRef > > vExtraTxnForCompact GUARDED_BY ( cs_main ) ;
static size_t vExtraTxnForCompactIt GUARDED_BY ( g_cs_orphans ) = 0 ;
static std : : vector < std : : pair < uint256 , CTransactionRef > > vExtraTxnForCompact GUARDED_BY ( g_cs_orphans ) ;
static const uint64_t RANDOMIZER_ID_ADDRESS_RELAY = 0x3cac0035b5866b90ULL ; // SHA256("main address relay")[0:8]
@ -127,7 +128,7 @@ namespace {
@@ -127,7 +128,7 @@ namespace {
int g_outbound_peers_with_protect_from_disconnect = 0 ;
/** When our tip was last updated. */
int64_t g_last_tip_update = 0 ;
std : : atomic < int64_t > g_last_tip_update ( 0 ) ;
/** Relay map, protected by cs_main. */
typedef std : : map < uint256 , CTransactionRef > MapRelay ;
@ -631,7 +632,7 @@ bool GetNodeStateStats(NodeId nodeid, CNodeStateStats &stats) {
@@ -631,7 +632,7 @@ bool GetNodeStateStats(NodeId nodeid, CNodeStateStats &stats) {
// mapOrphanTransactions
//
void AddToCompactExtraTransactions ( const CTransactionRef & tx ) EXCLUSIVE_LOCKS_REQUIRED ( cs_main )
void AddToCompactExtraTransactions ( const CTransactionRef & tx ) EXCLUSIVE_LOCKS_REQUIRED ( g_cs_orphans )
{
size_t max_extra_txn = gArgs . GetArg ( " -blockreconstructionextratxn " , DEFAULT_BLOCK_RECONSTRUCTION_EXTRA_TXN ) ;
if ( max_extra_txn < = 0 )
@ -642,7 +643,7 @@ void AddToCompactExtraTransactions(const CTransactionRef& tx) EXCLUSIVE_LOCKS_RE
@@ -642,7 +643,7 @@ void AddToCompactExtraTransactions(const CTransactionRef& tx) EXCLUSIVE_LOCKS_RE
vExtraTxnForCompactIt = ( vExtraTxnForCompactIt + 1 ) % max_extra_txn ;
}
bool AddOrphanTx ( const CTransactionRef & tx , NodeId peer ) EXCLUSIVE_LOCKS_REQUIRED ( cs_main )
bool AddOrphanTx ( const CTransactionRef & tx , NodeId peer ) EXCLUSIVE_LOCKS_REQUIRED ( g_cs_orphans )
{
const uint256 & hash = tx - > GetHash ( ) ;
if ( mapOrphanTransactions . count ( hash ) )
@ -675,7 +676,7 @@ bool AddOrphanTx(const CTransactionRef& tx, NodeId peer) EXCLUSIVE_LOCKS_REQUIRE
@@ -675,7 +676,7 @@ bool AddOrphanTx(const CTransactionRef& tx, NodeId peer) EXCLUSIVE_LOCKS_REQUIRE
return true ;
}
int static EraseOrphanTx ( uint256 hash ) EXCLUSIVE_LOCKS_REQUIRED ( cs_main )
int static EraseOrphanTx ( uint256 hash ) EXCLUSIVE_LOCKS_REQUIRED ( g_cs_orphans )
{
std : : map < uint256 , COrphanTx > : : iterator it = mapOrphanTransactions . find ( hash ) ;
if ( it = = mapOrphanTransactions . end ( ) )
@ -695,6 +696,7 @@ int static EraseOrphanTx(uint256 hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
@@ -695,6 +696,7 @@ int static EraseOrphanTx(uint256 hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
void EraseOrphansFor ( NodeId peer )
{
LOCK ( g_cs_orphans ) ;
int nErased = 0 ;
std : : map < uint256 , COrphanTx > : : iterator iter = mapOrphanTransactions . begin ( ) ;
while ( iter ! = mapOrphanTransactions . end ( ) )
@ -709,8 +711,10 @@ void EraseOrphansFor(NodeId peer)
@@ -709,8 +711,10 @@ void EraseOrphansFor(NodeId peer)
}
unsigned int LimitOrphanTxSize ( unsigned int nMaxOrphans ) EXCLUSIVE_LOCKS_REQUIRED ( cs_main )
unsigned int LimitOrphanTxSize ( unsigned int nMaxOrphans )
{
LOCK ( g_cs_orphans ) ;
unsigned int nEvicted = 0 ;
static int64_t nNextSweep ;
int64_t nNow = GetTime ( ) ;
@ -804,7 +808,7 @@ PeerLogicValidation::PeerLogicValidation(CConnman* connmanIn, CScheduler &schedu
@@ -804,7 +808,7 @@ PeerLogicValidation::PeerLogicValidation(CConnman* connmanIn, CScheduler &schedu
}
void PeerLogicValidation : : BlockConnected ( const std : : shared_ptr < const CBlock > & pblock , const CBlockIndex * pindex , const std : : vector < CTransactionRef > & vtxConflicted ) {
LOCK ( cs_main ) ;
LOCK ( g_cs_orphans ) ;
std : : vector < uint256 > vOrphanErase ;
@ -971,9 +975,13 @@ bool static AlreadyHave(const CInv& inv) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
@@ -971,9 +975,13 @@ bool static AlreadyHave(const CInv& inv) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
recentRejects - > reset ( ) ;
}
{
LOCK ( g_cs_orphans ) ;
if ( mapOrphanTransactions . count ( inv . hash ) ) return true ;
}
return recentRejects - > contains ( inv . hash ) | |
mempool . exists ( inv . hash ) | |
mapOrphanTransactions . count ( inv . hash ) | |
pcoinsTip - > HaveCoinInCache ( COutPoint ( inv . hash , 0 ) ) | | // Best effort: only try output 0 and 1
pcoinsTip - > HaveCoinInCache ( COutPoint ( inv . hash , 1 ) ) ;
}
@ -1030,180 +1038,198 @@ static void RelayAddress(const CAddress& addr, bool fReachable, CConnman* connma
@@ -1030,180 +1038,198 @@ static void RelayAddress(const CAddress& addr, bool fReachable, CConnman* connma
connman - > ForEachNodeThen ( std : : move ( sortfunc ) , std : : move ( pushfunc ) ) ;
}
void static ProcessGetData ( CNode * pfrom , const Consensus : : Params & consensusParams , CConnman * connman , const std : : atomic < bool > & interruptMsgProc )
void static ProcessGetBlock Data ( CNode * pfrom , const Consensus : : Params & consensusParams , const CInv & inv , CConnman * connman , const std : : atomic < bool > & interruptMsgProc )
{
std : : deque < CInv > : : iterator it = pfrom - > vRecvGetData . begin ( ) ;
std : : vector < CInv > vNotFound ;
const CNetMsgMaker msgMaker ( pfrom - > GetSendVersion ( ) ) ;
LOCK ( cs_main ) ;
while ( it ! = pfrom - > vRecvGetData . end ( ) ) {
// Don't bother if send buffer is too full to respond anyway
if ( pfrom - > fPauseSend )
break ;
bool send = false ;
std : : shared_ptr < const CBlock > a_recent_block ;
std : : shared_ptr < const CBlockHeaderAndShortTxIDs > a_recent_compact_block ;
bool fWitnessesPresentInARecentCompactBlock ;
{
LOCK ( cs_most_recent_block ) ;
a_recent_block = most_recent_block ;
a_recent_compact_block = most_recent_compact_block ;
fWitnessesPresentInARecentCompactBlock = fWitnessesPresentInMostRecentCompactBlock ;
}
const CInv & inv = * it ;
bool need_activate_chain = false ;
{
LOCK ( cs_main ) ;
BlockMap : : iterator mi = mapBlockIndex . find ( inv . hash ) ;
if ( mi ! = mapBlockIndex . end ( ) )
{
if ( interruptMsgProc )
return ;
if ( mi - > second - > nChainTx & & ! mi - > second - > IsValid ( BLOCK_VALID_SCRIPTS ) & &
mi - > second - > IsValid ( BLOCK_VALID_TREE ) ) {
// If we have the block and all of its parents, but have not yet validated it,
// we might be in the middle of connecting it (ie in the unlock of cs_main
// before ActivateBestChain but after AcceptBlock).
// In this case, we need to run ActivateBestChain prior to checking the relay
// conditions below.
need_activate_chain = true ;
}
}
} // release cs_main before calling ActivateBestChain
if ( need_activate_chain ) {
CValidationState dummy ;
ActivateBestChain ( dummy , Params ( ) , a_recent_block ) ;
}
it + + ;
LOCK ( cs_main ) ;
BlockMap : : iterator mi = mapBlockIndex . find ( inv . hash ) ;
if ( mi ! = mapBlockIndex . end ( ) ) {
send = BlockRequestAllowed ( mi - > second , consensusParams ) ;
if ( ! send ) {
LogPrint ( BCLog : : NET , " %s: ignoring request from peer=%i for old block that isn't in the main chain \n " , __func__ , pfrom - > GetId ( ) ) ;
}
}
const CNetMsgMaker msgMaker ( pfrom - > GetSendVersion ( ) ) ;
// disconnect node in case we have reached the outbound limit for serving historical blocks
// never disconnect whitelisted nodes
if ( send & & connman - > OutboundTargetReached ( true ) & & ( ( ( pindexBestHeader ! = nullptr ) & & ( pindexBestHeader - > GetBlockTime ( ) - mi - > second - > GetBlockTime ( ) > HISTORICAL_BLOCK_AGE ) ) | | inv . type = = MSG_FILTERED_BLOCK ) & & ! pfrom - > fWhitelisted )
{
LogPrint ( BCLog : : NET , " historical block serving limit reached, disconnect peer=%d \n " , pfrom - > GetId ( ) ) ;
//disconnect node
pfrom - > fDisconnect = true ;
send = false ;
}
// Avoid leaking prune-height by never sending blocks below the NODE_NETWORK_LIMITED threshold
if ( send & & ! pfrom - > fWhitelisted & & (
( ( ( pfrom - > GetLocalServices ( ) & NODE_NETWORK_LIMITED ) = = NODE_NETWORK_LIMITED ) & & ( ( pfrom - > GetLocalServices ( ) & NODE_NETWORK ) ! = NODE_NETWORK ) & & ( chainActive . Tip ( ) - > nHeight - mi - > second - > nHeight > ( int ) NODE_NETWORK_LIMITED_MIN_BLOCKS + 2 /* add two blocks buffer extension for possible races */ ) )
) ) {
LogPrint ( BCLog : : NET , " Ignore block request below NODE_NETWORK_LIMITED threshold from peer=%d \n " , pfrom - > GetId ( ) ) ;
if ( inv . type = = MSG_BLOCK | | inv . type = = MSG_FILTERED_BLOCK | | inv . type = = MSG_CMPCT_BLOCK | | inv . type = = MSG_WITNESS_BLOCK )
//disconnect node and prevent it from stalling (would otherwise wait for the missing block)
pfrom - > fDisconnect = true ;
send = false ;
}
// Pruned nodes may have deleted the block, so check whether
// it's available before trying to send.
if ( send & & ( mi - > second - > nStatus & BLOCK_HAVE_DATA ) )
{
std : : shared_ptr < const CBlock > pblock ;
if ( a_recent_block & & a_recent_block - > GetHash ( ) = = ( * mi ) . second - > GetBlockHash ( ) ) {
pblock = a_recent_block ;
} else {
// Send block from disk
std : : shared_ptr < CBlock > pblockRead = std : : make_shared < CBlock > ( ) ;
if ( ! ReadBlockFromDisk ( * pblockRead , ( * mi ) . second , consensusParams ) )
assert ( ! " cannot load block from disk " ) ;
pblock = pblockRead ;
}
if ( inv . type = = MSG_BLOCK )
connman - > PushMessage ( pfrom , msgMaker . Make ( SERIALIZE_TRANSACTION_NO_WITNESS , NetMsgType : : BLOCK , * pblock ) ) ;
else if ( inv . type = = MSG_WITNESS_BLOCK )
connman - > PushMessage ( pfrom , msgMaker . Make ( NetMsgType : : BLOCK , * pblock ) ) ;
else if ( inv . type = = MSG_FILTERED_BLOCK )
{
bool sendMerkleBlock = false ;
CMerkleBlock merkleBlock ;
{
bool send = false ;
BlockMap : : iterator mi = mapBlockIndex . find ( inv . hash ) ;
std : : shared_ptr < const CBlock > a_recent_block ;
std : : shared_ptr < const CBlockHeaderAndShortTxIDs > a_recent_compact_block ;
bool fWitnessesPresentInARecentCompactBlock ;
{
LOCK ( cs_most_recent_block ) ;
a_recent_block = most_recent_block ;
a_recent_compact_block = most_recent_compact_block ;
fWitnessesPresentInARecentCompactBlock = fWitnessesPresentInMostRecentCompactBlock ;
LOCK ( pfrom - > cs_filter ) ;
if ( pfrom - > pfilter ) {
sendMerkleBlock = true ;
merkleBlock = CMerkleBlock ( * pblock , * pfrom - > pfilter ) ;
}
if ( mi ! = mapBlockIndex . end ( ) )
{
if ( mi - > second - > nChainTx & & ! mi - > second - > IsValid ( BLOCK_VALID_SCRIPTS ) & &
mi - > second - > IsValid ( BLOCK_VALID_TREE ) ) {
// If we have the block and all of its parents, but have not yet validated it,
// we might be in the middle of connecting it (ie in the unlock of cs_main
// before ActivateBestChain but after AcceptBlock).
// In this case, we need to run ActivateBestChain prior to checking the relay
// conditions below.
CValidationState dummy ;
ActivateBestChain ( dummy , Params ( ) , a_recent_block ) ;
}
send = BlockRequestAllowed ( mi - > second , consensusParams ) ;
if ( ! send ) {
LogPrint ( BCLog : : NET , " %s: ignoring request from peer=%i for old block that isn't in the main chain \n " , __func__ , pfrom - > GetId ( ) ) ;
}
}
if ( sendMerkleBlock ) {
connman - > PushMessage ( pfrom , msgMaker . Make ( NetMsgType : : MERKLEBLOCK , merkleBlock ) ) ;
// CMerkleBlock just contains hashes, so also push any transactions in the block the client did not see
// This avoids hurting performance by pointlessly requiring a round-trip
// Note that there is currently no way for a node to request any single transactions we didn't send here -
// they must either disconnect and retry or request the full block.
// Thus, the protocol spec specified allows for us to provide duplicate txn here,
// however we MUST always provide at least what the remote peer needs
typedef std : : pair < unsigned int , uint256 > PairType ;
for ( PairType & pair : merkleBlock . vMatchedTxn )
connman - > PushMessage ( pfrom , msgMaker . Make ( SERIALIZE_TRANSACTION_NO_WITNESS , NetMsgType : : TX , * pblock - > vtx [ pair . first ] ) ) ;
}
// else
// no response
}
else if ( inv . type = = MSG_CMPCT_BLOCK )
{
// If a peer is asking for old blocks, we're almost guaranteed
// they won't have a useful mempool to match against a compact block,
// and we don't feel like constructing the object for them, so
// instead we respond with the full, non-compact block.
bool fPeerWantsWitness = State ( pfrom - > GetId ( ) ) - > fWantsCmpctWitness ;
int nSendFlags = fPeerWantsWitness ? 0 : SERIALIZE_TRANSACTION_NO_WITNESS ;
if ( CanDirectFetch ( consensusParams ) & & mi - > second - > nHeight > = chainActive . Height ( ) - MAX_CMPCTBLOCK_DEPTH ) {
if ( ( fPeerWantsWitness | | ! fWitnessesPresentInARecentCompactBlock ) & & a_recent_compact_block & & a_recent_compact_block - > header . GetHash ( ) = = mi - > second - > GetBlockHash ( ) ) {
connman - > PushMessage ( pfrom , msgMaker . Make ( nSendFlags , NetMsgType : : CMPCTBLOCK , * a_recent_compact_block ) ) ;
} else {
CBlockHeaderAndShortTxIDs cmpctblock ( * pblock , fPeerWantsWitness ) ;
connman - > PushMessage ( pfrom , msgMaker . Make ( nSendFlags , NetMsgType : : CMPCTBLOCK , cmpctblock ) ) ;
}
// disconnect node in case we have reached the outbound limit for serving historical blocks
// never disconnect whitelisted nodes
if ( send & & connman - > OutboundTargetReached ( true ) & & ( ( ( pindexBestHeader ! = nullptr ) & & ( pindexBestHeader - > GetBlockTime ( ) - mi - > second - > GetBlockTime ( ) > HISTORICAL_BLOCK_AGE ) ) | | inv . type = = MSG_FILTERED_BLOCK ) & & ! pfrom - > fWhitelisted )
{
LogPrint ( BCLog : : NET , " historical block serving limit reached, disconnect peer=%d \n " , pfrom - > GetId ( ) ) ;
} else {
connman - > PushMessage ( pfrom , msgMaker . Make ( nSendFlags , NetMsgType : : BLOCK , * pblock ) ) ;
}
}
//disconnect node
pfrom - > fDisconnect = true ;
send = false ;
}
// Avoid leaking prune-height by never sending blocks below the NODE_NETWORK_LIMITED threshold
if ( send & & ! pfrom - > fWhitelisted & & (
( ( ( pfrom - > GetLocalServices ( ) & NODE_NETWORK_LIMITED ) = = NODE_NETWORK_LIMITED ) & & ( ( pfrom - > GetLocalServices ( ) & NODE_NETWORK ) ! = NODE_NETWORK ) & & ( chainActive . Tip ( ) - > nHeight - mi - > second - > nHeight > ( int ) NODE_NETWORK_LIMITED_MIN_BLOCKS + 2 /* add two blocks buffer extension for possible races */ ) )
) ) {
LogPrint ( BCLog : : NET , " Ignore block request below NODE_NETWORK_LIMITED threshold from peer=%d \n " , pfrom - > GetId ( ) ) ;
// Trigger the peer node to send a getblocks request for the next batch of inventory
if ( inv . hash = = pfrom - > hashContinue )
{
// Bypass PushInventory, this must send even if redundant,
// and we want it right after the last block so they don't
// wait for other stuff first.
std : : vector < CInv > vInv ;
vInv . push_back ( CInv ( MSG_BLOCK , chainActive . Tip ( ) - > GetBlockHash ( ) ) ) ;
connman - > PushMessage ( pfrom , msgMaker . Make ( NetMsgType : : INV , vInv ) ) ;
pfrom - > hashContinue . SetNull ( ) ;
}
}
}
//disconnect node and prevent it from stalling (would otherwise wait for the missing block)
pfrom - > fDisconnect = true ;
send = false ;
}
// Pruned nodes may have deleted the block, so check whether
// it's available before trying to send.
if ( send & & ( mi - > second - > nStatus & BLOCK_HAVE_DATA ) )
{
std : : shared_ptr < const CBlock > pblock ;
if ( a_recent_block & & a_recent_block - > GetHash ( ) = = ( * mi ) . second - > GetBlockHash ( ) ) {
pblock = a_recent_block ;
} else {
// Send block from disk
std : : shared_ptr < CBlock > pblockRead = std : : make_shared < CBlock > ( ) ;
if ( ! ReadBlockFromDisk ( * pblockRead , ( * mi ) . second , consensusParams ) )
assert ( ! " cannot load block from disk " ) ;
pblock = pblockRead ;
}
if ( inv . type = = MSG_BLOCK )
connman - > PushMessage ( pfrom , msgMaker . Make ( SERIALIZE_TRANSACTION_NO_WITNESS , NetMsgType : : BLOCK , * pblock ) ) ;
else if ( inv . type = = MSG_WITNESS_BLOCK )
connman - > PushMessage ( pfrom , msgMaker . Make ( NetMsgType : : BLOCK , * pblock ) ) ;
else if ( inv . type = = MSG_FILTERED_BLOCK )
{
bool sendMerkleBlock = false ;
CMerkleBlock merkleBlock ;
{
LOCK ( pfrom - > cs_filter ) ;
if ( pfrom - > pfilter ) {
sendMerkleBlock = true ;
merkleBlock = CMerkleBlock ( * pblock , * pfrom - > pfilter ) ;
}
}
if ( sendMerkleBlock ) {
connman - > PushMessage ( pfrom , msgMaker . Make ( NetMsgType : : MERKLEBLOCK , merkleBlock ) ) ;
// CMerkleBlock just contains hashes, so also push any transactions in the block the client did not see
// This avoids hurting performance by pointlessly requiring a round-trip
// Note that there is currently no way for a node to request any single transactions we didn't send here -
// they must either disconnect and retry or request the full block.
// Thus, the protocol spec specified allows for us to provide duplicate txn here,
// however we MUST always provide at least what the remote peer needs
typedef std : : pair < unsigned int , uint256 > PairType ;
for ( PairType & pair : merkleBlock . vMatchedTxn )
connman - > PushMessage ( pfrom , msgMaker . Make ( SERIALIZE_TRANSACTION_NO_WITNESS , NetMsgType : : TX , * pblock - > vtx [ pair . first ] ) ) ;
}
// else
// no response
}
else if ( inv . type = = MSG_CMPCT_BLOCK )
{
// If a peer is asking for old blocks, we're almost guaranteed
// they won't have a useful mempool to match against a compact block,
// and we don't feel like constructing the object for them, so
// instead we respond with the full, non-compact block.
bool fPeerWantsWitness = State ( pfrom - > GetId ( ) ) - > fWantsCmpctWitness ;
int nSendFlags = fPeerWantsWitness ? 0 : SERIALIZE_TRANSACTION_NO_WITNESS ;
if ( CanDirectFetch ( consensusParams ) & & mi - > second - > nHeight > = chainActive . Height ( ) - MAX_CMPCTBLOCK_DEPTH ) {
if ( ( fPeerWantsWitness | | ! fWitnessesPresentInARecentCompactBlock ) & & a_recent_compact_block & & a_recent_compact_block - > header . GetHash ( ) = = mi - > second - > GetBlockHash ( ) ) {
connman - > PushMessage ( pfrom , msgMaker . Make ( nSendFlags , NetMsgType : : CMPCTBLOCK , * a_recent_compact_block ) ) ;
} else {
CBlockHeaderAndShortTxIDs cmpctblock ( * pblock , fPeerWantsWitness ) ;
connman - > PushMessage ( pfrom , msgMaker . Make ( nSendFlags , NetMsgType : : CMPCTBLOCK , cmpctblock ) ) ;
}
} else {
connman - > PushMessage ( pfrom , msgMaker . Make ( nSendFlags , NetMsgType : : BLOCK , * pblock ) ) ;
}
}
void static ProcessGetData ( CNode * pfrom , const Consensus : : Params & consensusParams , CConnman * connman , const std : : atomic < bool > & interruptMsgProc )
{
AssertLockNotHeld ( cs_main ) ;
// Trigger the peer node to send a getblocks request for the next batch of inventory
if ( inv . hash = = pfrom - > hashContinue )
{
// Bypass PushInventory, this must send even if redundant,
// and we want it right after the last block so they don't
// wait for other stuff first.
std : : vector < CInv > vInv ;
vInv . push_back ( CInv ( MSG_BLOCK , chainActive . Tip ( ) - > GetBlockHash ( ) ) ) ;
connman - > PushMessage ( pfrom , msgMaker . Make ( NetMsgType : : INV , vInv ) ) ;
pfrom - > hashContinue . SetNull ( ) ;
}
}
}
else if ( inv . type = = MSG_TX | | inv . type = = MSG_WITNESS_TX )
{
// Send stream from relay memory
bool push = false ;
auto mi = mapRelay . find ( inv . hash ) ;
int nSendFlags = ( inv . type = = MSG_TX ? SERIALIZE_TRANSACTION_NO_WITNESS : 0 ) ;
if ( mi ! = mapRelay . end ( ) ) {
connman - > PushMessage ( pfrom , msgMaker . Make ( nSendFlags , NetMsgType : : TX , * mi - > second ) ) ;
std : : deque < CInv > : : iterator it = pfrom - > vRecvGetData . begin ( ) ;
std : : vector < CInv > vNotFound ;
const CNetMsgMaker msgMaker ( pfrom - > GetSendVersion ( ) ) ;
{
LOCK ( cs_main ) ;
while ( it ! = pfrom - > vRecvGetData . end ( ) & & ( it - > type = = MSG_TX | | it - > type = = MSG_WITNESS_TX ) ) {
if ( interruptMsgProc )
return ;
// Don't bother if send buffer is too full to respond anyway
if ( pfrom - > fPauseSend )
break ;
const CInv & inv = * it ;
it + + ;
// Send stream from relay memory
bool push = false ;
auto mi = mapRelay . find ( inv . hash ) ;
int nSendFlags = ( inv . type = = MSG_TX ? SERIALIZE_TRANSACTION_NO_WITNESS : 0 ) ;
if ( mi ! = mapRelay . end ( ) ) {
connman - > PushMessage ( pfrom , msgMaker . Make ( nSendFlags , NetMsgType : : TX , * mi - > second ) ) ;
push = true ;
} else if ( pfrom - > timeLastMempoolReq ) {
auto txinfo = mempool . info ( inv . hash ) ;
// To protect privacy, do not answer getdata using the mempool when
// that TX couldn't have been INVed in reply to a MEMPOOL request.
if ( txinfo . tx & & txinfo . nTime < = pfrom - > timeLastMempoolReq ) {
connman - > PushMessage ( pfrom , msgMaker . Make ( nSendFlags , NetMsgType : : TX , * txinfo . tx ) ) ;
push = true ;
} else if ( pfrom - > timeLastMempoolReq ) {
auto txinfo = mempool . info ( inv . hash ) ;
// To protect privacy, do not answer getdata using the mempool when
// that TX couldn't have been INVed in reply to a MEMPOOL request.
if ( txinfo . tx & & txinfo . nTime < = pfrom - > timeLastMempoolReq ) {
connman - > PushMessage ( pfrom , msgMaker . Make ( nSendFlags , NetMsgType : : TX , * txinfo . tx ) ) ;
push = true ;
}
}
if ( ! push ) {
vNotFound . push_back ( inv ) ;
}
}
if ( ! push ) {
vNotFound . push_back ( inv ) ;
}
// Track requests for our stuff.
GetMainSignals ( ) . Inventory ( inv . hash ) ;
}
} // release cs_main
if ( inv . type = = MSG_BLOCK | | inv . type = = MSG_FILTERED_BLOCK | | inv . type = = MSG_CMPCT_BLOCK | | inv . type = = MSG_WITNESS_BLOCK )
break ;
if ( it ! = pfrom - > vRecvGetData . end ( ) ) {
const CInv & inv = * it ;
it + + ;
if ( inv . type = = MSG_BLOCK | | inv . type = = MSG_FILTERED_BLOCK | | inv . type = = MSG_CMPCT_BLOCK | | inv . type = = MSG_WITNESS_BLOCK ) {
ProcessGetBlockData ( pfrom , consensusParams , inv , connman , interruptMsgProc ) ;
}
}
@ -2008,7 +2034,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
@@ -2008,7 +2034,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
inv . type = State ( pfrom - > GetId ( ) ) - > fWantsCmpctWitness ? MSG_WITNESS_BLOCK : MSG_BLOCK ;
inv . hash = req . blockhash ;
pfrom - > vRecvGetData . push_back ( inv ) ;
ProcessGetData ( pfrom , chainparams . GetConsensus ( ) , connman , interruptMsgProc ) ;
// The message processing loop will go around again (without pausing) and we'll respond then (without cs_main)
return true ;
}
@ -2101,7 +2127,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
@@ -2101,7 +2127,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
CInv inv ( MSG_TX , tx . GetHash ( ) ) ;
pfrom - > AddInventoryKnown ( inv ) ;
LOCK ( cs_main ) ;
LOCK2 ( cs_main , g_cs_orphans ) ;
bool fMissingInputs = false ;
CValidationState state ;
@ -2324,7 +2350,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
@@ -2324,7 +2350,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
bool fBlockReconstructed = false ;
{
LOCK ( cs_main ) ;
LOCK2 ( cs_main , g_cs_orphans ) ;
// If AcceptBlockHeader returned true, it set pindex
assert ( pindex ) ;
UpdateBlockAvailability ( pfrom - > GetId ( ) , pindex - > GetBlockHash ( ) ) ;