Browse Source

Split up and optimize transaction and block inv queues

0.13
Pieter Wuille 9 years ago
parent
commit
dc13dcd2be
  1. 76
      src/main.cpp
  2. 20
      src/net.h

76
src/main.cpp

@ -5569,18 +5569,11 @@ public:
mp = mempool; mp = mempool;
} }
bool operator()(const CInv &a, const CInv &b) bool operator()(std::set<uint256>::iterator a, std::set<uint256>::iterator b)
{ {
if (a.type != MSG_TX && b.type != MSG_TX) { /* As std::make_heap produces a max-heap, we want the entries with the
return false; * fewest ancestors/highest fee to sort later. */
} else { return mp->CompareDepthAndScore(*b, *a);
if (a.type != MSG_TX) {
return true;
} else if (b.type != MSG_TX) {
return false;
}
return mp->CompareDepthAndScore(a.hash, b.hash);
}
} }
}; };
@ -5808,38 +5801,59 @@ bool SendMessages(CNode* pto)
// Message: inventory // Message: inventory
// //
vector<CInv> vInv; vector<CInv> vInv;
vector<CInv> vInvWait;
{ {
LOCK(pto->cs_inventory);
vInv.reserve(std::max<size_t>(pto->vInventoryBlockToSend.size(), INVENTORY_BROADCAST_MAX));
// Add blocks
BOOST_FOREACH(const uint256& hash, pto->vInventoryBlockToSend) {
vInv.push_back(CInv(MSG_BLOCK, hash));
if (vInv.size() == MAX_INV_SZ) {
pto->PushMessage(NetMsgType::INV, vInv);
vInv.clear();
}
}
pto->vInventoryBlockToSend.clear();
// Determine transactions to relay
bool fSendTrickle = pto->fWhitelisted; bool fSendTrickle = pto->fWhitelisted;
if (pto->nNextInvSend < nNow) { if (pto->nNextInvSend < nNow) {
fSendTrickle = true; fSendTrickle = true;
// Use half the delay for outbound peers, as their is less privacy concern for them. // Use half the delay for outbound peers, as there is less privacy concern for them.
pto->nNextInvSend = PoissonNextSend(nNow, INVENTORY_BROADCAST_INTERVAL >> !pto->fInbound); pto->nNextInvSend = PoissonNextSend(nNow, INVENTORY_BROADCAST_INTERVAL >> !pto->fInbound);
} }
LOCK(pto->cs_inventory); if (fSendTrickle) {
if (fSendTrickle && pto->vInventoryToSend.size() > 1) { // Produce a vector with all candidates for sending
vector<std::set<uint256>::iterator> vInvTx;
vInvTx.reserve(pto->setInventoryTxToSend.size());
for (std::set<uint256>::iterator it = pto->setInventoryTxToSend.begin(); it != pto->setInventoryTxToSend.end(); it++) {
vInvTx.push_back(it);
}
// Topologically and fee-rate sort the inventory we send for privacy and priority reasons. // Topologically and fee-rate sort the inventory we send for privacy and priority reasons.
// A heap is used so that not all items need sorting if only a few are being sent.
CompareInvMempoolOrder compareInvMempoolOrder(&mempool); CompareInvMempoolOrder compareInvMempoolOrder(&mempool);
std::stable_sort(pto->vInventoryToSend.begin(), pto->vInventoryToSend.end(), compareInvMempoolOrder); std::make_heap(vInvTx.begin(), vInvTx.end(), compareInvMempoolOrder);
}
vInv.reserve(std::min<size_t>(INVENTORY_BROADCAST_MAX, pto->vInventoryToSend.size()));
vInvWait.reserve(pto->vInventoryToSend.size());
BOOST_FOREACH(const CInv& inv, pto->vInventoryToSend)
{
if (inv.type == MSG_TX && pto->filterInventoryKnown.contains(inv.hash))
continue;
// No reason to drain out at many times the network's capacity, // No reason to drain out at many times the network's capacity,
// especially since we have many peers and some will draw much shorter delays. // especially since we have many peers and some will draw much shorter delays.
if (vInv.size() >= INVENTORY_BROADCAST_MAX || (inv.type == MSG_TX && !fSendTrickle)) { unsigned int nRelayedTransactions = 0;
vInvWait.push_back(inv); while (!vInvTx.empty() && nRelayedTransactions < INVENTORY_BROADCAST_MAX) {
continue; // Fetch the top element from the heap
std::pop_heap(vInvTx.begin(), vInvTx.end(), compareInvMempoolOrder);
std::set<uint256>::iterator it = vInvTx.back();
vInvTx.pop_back();
uint256 hash = *it;
// Remove it from the to-be-sent set
pto->setInventoryTxToSend.erase(it);
// Check if not in the filter already
if (pto->filterInventoryKnown.contains(hash)) {
continue;
}
// Send
vInv.push_back(CInv(MSG_TX, hash));
nRelayedTransactions++;
pto->filterInventoryKnown.insert(hash);
} }
pto->filterInventoryKnown.insert(inv.hash);
vInv.push_back(inv);
} }
pto->vInventoryToSend = vInvWait;
} }
if (!vInv.empty()) if (!vInv.empty())
pto->PushMessage(NetMsgType::INV, vInv); pto->PushMessage(NetMsgType::INV, vInv);

20
src/net.h

@ -397,7 +397,13 @@ public:
// inventory based relay // inventory based relay
CRollingBloomFilter filterInventoryKnown; CRollingBloomFilter filterInventoryKnown;
std::vector<CInv> vInventoryToSend; // Set of transaction ids we still have to announce.
// They are sorted by the mempool before relay, so the order is not important.
std::set<uint256> setInventoryTxToSend;
// List of block ids we still have announce.
// There is no final sorting before sending, as they are always sent immediately
// and in the order requested.
std::vector<uint256> vInventoryBlockToSend;
CCriticalSection cs_inventory; CCriticalSection cs_inventory;
std::set<uint256> setAskFor; std::set<uint256> setAskFor;
std::multimap<int64_t, CInv> mapAskFor; std::multimap<int64_t, CInv> mapAskFor;
@ -517,11 +523,13 @@ public:
void PushInventory(const CInv& inv) void PushInventory(const CInv& inv)
{ {
{ LOCK(cs_inventory);
LOCK(cs_inventory); if (inv.type == MSG_TX) {
if (inv.type == MSG_TX && filterInventoryKnown.contains(inv.hash)) if (!filterInventoryKnown.contains(inv.hash)) {
return; setInventoryTxToSend.insert(inv.hash);
vInventoryToSend.push_back(inv); }
} else if (inv.type == MSG_BLOCK) {
vInventoryBlockToSend.push_back(inv.hash);
} }
} }

Loading…
Cancel
Save