Browse Source

Eliminate TX trickle bypass, sort TX invs for privacy and priority.

Previously Bitcoin would send 1/4 of transactions out to all peers
 instantly.  This causes high overhead because it makes >80% of
 INVs size 1.  Doing so harms privacy, because it limits the
 amount of source obscurity a transaction can receive.

These randomized broadcasts also disobeyed transaction dependencies
 and required use of the orphan pool.  Because the orphan pool is
 so small this leads to poor propagation for dependent transactions.

When the bypass wasn't in effect, transactions were sent in the
 order they were received.  This avoided creating orphans but
 undermines privacy fairly significantly.

This commit:
 Eliminates the bypass. The bypass is replaced by halving the
  average delay for outbound peers.

 Sorts candidate transactions for INV by their topological
  depth then by their feerate (then hash); removing the
  information leakage and providing priority service to
  higher fee transactions.

 Limits the amount of transactions sent in a single INV to
  7tx/sec (and twice that for outbound); this limits the
  harm of low fee transaction floods, gives faster relay
  service to higher fee transactions. The 7 sounds lower
  than it really is because received advertisements need
  not be sent, and because the aggregate rate is multipled
  by the number of peers.
0.13
Gregory Maxwell 9 years ago committed by Pieter Wuille
parent
commit
f2d3ba7386
  1. 56
      src/main.cpp
  2. 9
      src/main.h
  3. 15
      src/txmempool.cpp
  4. 1
      src/txmempool.h

56
src/main.cpp

@ -5560,6 +5560,29 @@ bool ProcessMessages(CNode* pfrom)
return fOk; return fOk;
} }
class CompareInvMempoolOrder
{
CTxMemPool *mp;
public:
CompareInvMempoolOrder(CTxMemPool *mempool)
{
mp = mempool;
}
bool operator()(const CInv &a, const CInv &b)
{
if (a.type != MSG_TX && b.type != MSG_TX) {
return false;
} else {
if (a.type != MSG_TX) {
return true;
} else if (b.type != MSG_TX) {
return false;
}
return mp->CompareDepthAndScore(a.hash, b.hash);
}
}
};
bool SendMessages(CNode* pto) bool SendMessages(CNode* pto)
{ {
@ -5790,42 +5813,31 @@ bool SendMessages(CNode* pto)
bool fSendTrickle = pto->fWhitelisted; bool fSendTrickle = pto->fWhitelisted;
if (pto->nNextInvSend < nNow) { if (pto->nNextInvSend < nNow) {
fSendTrickle = true; fSendTrickle = true;
pto->nNextInvSend = PoissonNextSend(nNow, AVG_INVENTORY_BROADCAST_INTERVAL); // Use half the delay for outbound peers, as their is less privacy concern for them.
pto->nNextInvSend = PoissonNextSend(nNow, INVENTORY_BROADCAST_INTERVAL >> !pto->fInbound);
} }
LOCK(pto->cs_inventory); LOCK(pto->cs_inventory);
vInv.reserve(std::min<size_t>(1000, pto->vInventoryToSend.size())); if (fSendTrickle && pto->vInventoryToSend.size() > 1) {
// Topologically and fee-rate sort the inventory we send for privacy and priority reasons.
CompareInvMempoolOrder compareInvMempoolOrder(&mempool);
std::stable_sort(pto->vInventoryToSend.begin(), pto->vInventoryToSend.end(), compareInvMempoolOrder);
}
vInv.reserve(std::min<size_t>(INVENTORY_BROADCAST_MAX, pto->vInventoryToSend.size()));
vInvWait.reserve(pto->vInventoryToSend.size()); vInvWait.reserve(pto->vInventoryToSend.size());
BOOST_FOREACH(const CInv& inv, pto->vInventoryToSend) BOOST_FOREACH(const CInv& inv, pto->vInventoryToSend)
{ {
if (inv.type == MSG_TX && pto->filterInventoryKnown.contains(inv.hash)) if (inv.type == MSG_TX && pto->filterInventoryKnown.contains(inv.hash))
continue; continue;
// No reason to drain out at many times the network's capacity,
// trickle out tx inv to protect privacy // especially since we have many peers and some will draw much shorter delays.
if (inv.type == MSG_TX && !fSendTrickle) if (vInv.size() >= INVENTORY_BROADCAST_MAX || (inv.type == MSG_TX && !fSendTrickle)) {
{
// 1/4 of tx invs blast to all immediately
static uint256 hashSalt;
if (hashSalt.IsNull())
hashSalt = GetRandHash();
uint256 hashRand = ArithToUint256(UintToArith256(inv.hash) ^ UintToArith256(hashSalt));
hashRand = Hash(BEGIN(hashRand), END(hashRand));
bool fTrickleWait = ((UintToArith256(hashRand) & 3) != 0);
if (fTrickleWait)
{
vInvWait.push_back(inv); vInvWait.push_back(inv);
continue; continue;
} }
}
pto->filterInventoryKnown.insert(inv.hash); pto->filterInventoryKnown.insert(inv.hash);
vInv.push_back(inv); vInv.push_back(inv);
if (vInv.size() >= 1000)
{
pto->PushMessage(NetMsgType::INV, vInv);
vInv.clear();
}
} }
pto->vInventoryToSend = vInvWait; pto->vInventoryToSend = vInvWait;
} }

9
src/main.h

@ -99,9 +99,12 @@ static const unsigned int MAX_REJECT_MESSAGE_LENGTH = 111;
static const unsigned int AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL = 24 * 24 * 60; static const unsigned int AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL = 24 * 24 * 60;
/** Average delay between peer address broadcasts in seconds. */ /** Average delay between peer address broadcasts in seconds. */
static const unsigned int AVG_ADDRESS_BROADCAST_INTERVAL = 30; static const unsigned int AVG_ADDRESS_BROADCAST_INTERVAL = 30;
/** Average delay between trickled inventory broadcasts in seconds. /** Average delay between trickled inventory transmissions in seconds.
* Blocks, whitelisted receivers, and a random 25% of transactions bypass this. */ * Blocks and whitelisted receivers bypass this, outbound peers get half this delay. */
static const unsigned int AVG_INVENTORY_BROADCAST_INTERVAL = 5; static const unsigned int INVENTORY_BROADCAST_INTERVAL = 5;
/** Maximum number of inventory items to send per transmission.
* Limits the impact of low-fee transaction floods. */
static const unsigned int INVENTORY_BROADCAST_MAX = 7 * INVENTORY_BROADCAST_INTERVAL;
/** Average delay between feefilter broadcasts in seconds. */ /** Average delay between feefilter broadcasts in seconds. */
static const unsigned int AVG_FEEFILTER_BROADCAST_INTERVAL = 10 * 60; static const unsigned int AVG_FEEFILTER_BROADCAST_INTERVAL = 10 * 60;
/** Maximum feefilter broadcast delay after significant change. */ /** Maximum feefilter broadcast delay after significant change. */

15
src/txmempool.cpp

@ -752,6 +752,21 @@ void CTxMemPool::check(const CCoinsViewCache *pcoins) const
assert(innerUsage == cachedInnerUsage); assert(innerUsage == cachedInnerUsage);
} }
bool CTxMemPool::CompareDepthAndScore(const uint256& hasha, const uint256& hashb)
{
LOCK(cs);
indexed_transaction_set::const_iterator i = mapTx.find(hasha);
if (i == mapTx.end()) return false;
indexed_transaction_set::const_iterator j = mapTx.find(hashb);
if (j == mapTx.end()) return true;
uint64_t counta = i->GetCountWithAncestors();
uint64_t countb = j->GetCountWithAncestors();
if (counta == countb) {
return CompareTxMemPoolEntryByScore()(*i, *j);
}
return counta < countb;
}
void CTxMemPool::queryHashes(vector<uint256>& vtxid) void CTxMemPool::queryHashes(vector<uint256>& vtxid)
{ {
vtxid.clear(); vtxid.clear();

1
src/txmempool.h

@ -511,6 +511,7 @@ public:
std::list<CTransaction>& conflicts, bool fCurrentEstimate = true); std::list<CTransaction>& conflicts, bool fCurrentEstimate = true);
void clear(); void clear();
void _clear(); //lock free void _clear(); //lock free
bool CompareDepthAndScore(const uint256& hasha, const uint256& hashb);
void queryHashes(std::vector<uint256>& vtxid); void queryHashes(std::vector<uint256>& vtxid);
void pruneSpent(const uint256& hash, CCoins &coins); void pruneSpent(const uint256& hash, CCoins &coins);
unsigned int GetTransactionsUpdated() const; unsigned int GetTransactionsUpdated() const;

Loading…
Cancel
Save