|
|
|
// Copyright (c) 2009-2010 Satoshi Nakamoto
|
|
|
|
// Copyright (c) 2009-2013 The Bitcoin developers
|
|
|
|
// Distributed under the MIT/X11 software license, see the accompanying
|
|
|
|
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
|
|
|
|
|
|
|
#include "core.h"
|
|
|
|
#include "txmempool.h"
|
|
|
|
|
|
|
|
#include <boost/circular_buffer.hpp>
|
|
|
|
|
|
|
|
using namespace std;
|
|
|
|
|
|
|
|
CTxMemPoolEntry::CTxMemPoolEntry()
|
|
|
|
{
|
|
|
|
nHeight = MEMPOOL_HEIGHT;
|
|
|
|
}
|
|
|
|
|
|
|
|
CTxMemPoolEntry::CTxMemPoolEntry(const CTransaction& _tx, int64_t _nFee,
|
|
|
|
int64_t _nTime, double _dPriority,
|
|
|
|
unsigned int _nHeight):
|
|
|
|
tx(_tx), nFee(_nFee), nTime(_nTime), dPriority(_dPriority), nHeight(_nHeight)
|
|
|
|
{
|
|
|
|
nTxSize = ::GetSerializeSize(tx, SER_NETWORK, PROTOCOL_VERSION);
|
|
|
|
}
|
|
|
|
|
|
|
|
CTxMemPoolEntry::CTxMemPoolEntry(const CTxMemPoolEntry& other)
|
|
|
|
{
|
|
|
|
*this = other;
|
|
|
|
}
|
|
|
|
|
|
|
|
double
|
|
|
|
CTxMemPoolEntry::GetPriority(unsigned int currentHeight) const
|
|
|
|
{
|
|
|
|
int64_t nValueIn = tx.GetValueOut()+nFee;
|
|
|
|
double deltaPriority = ((double)(currentHeight-nHeight)*nValueIn)/nTxSize;
|
|
|
|
double dResult = dPriority + deltaPriority;
|
|
|
|
return dResult;
|
|
|
|
}
|
|
|
|
|
|
|
|
//
|
|
|
|
// Keep track of fee/priority for transactions confirmed within N blocks
|
|
|
|
//
|
|
|
|
class CBlockAverage
|
|
|
|
{
|
|
|
|
private:
|
|
|
|
boost::circular_buffer<CFeeRate> feeSamples;
|
|
|
|
boost::circular_buffer<double> prioritySamples;
|
|
|
|
|
|
|
|
template<typename T> std::vector<T> buf2vec(boost::circular_buffer<T> buf) const
|
|
|
|
{
|
|
|
|
std::vector<T> vec(buf.begin(), buf.end());
|
|
|
|
return vec;
|
|
|
|
}
|
|
|
|
|
|
|
|
public:
|
|
|
|
CBlockAverage() : feeSamples(100), prioritySamples(100) { }
|
|
|
|
|
|
|
|
void RecordFee(const CFeeRate& feeRate) {
|
|
|
|
feeSamples.push_back(feeRate);
|
|
|
|
}
|
|
|
|
|
|
|
|
void RecordPriority(double priority) {
|
|
|
|
prioritySamples.push_back(priority);
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t FeeSamples() const { return feeSamples.size(); }
|
|
|
|
size_t GetFeeSamples(std::vector<CFeeRate>& insertInto) const
|
|
|
|
{
|
|
|
|
BOOST_FOREACH(const CFeeRate& f, feeSamples)
|
|
|
|
insertInto.push_back(f);
|
|
|
|
return feeSamples.size();
|
|
|
|
}
|
|
|
|
size_t PrioritySamples() const { return prioritySamples.size(); }
|
|
|
|
size_t GetPrioritySamples(std::vector<double>& insertInto) const
|
|
|
|
{
|
|
|
|
BOOST_FOREACH(double d, prioritySamples)
|
|
|
|
insertInto.push_back(d);
|
|
|
|
return prioritySamples.size();
|
|
|
|
}
|
|
|
|
|
|
|
|
// Used as belt-and-suspenders check when reading to detect
|
|
|
|
// file corruption
|
|
|
|
bool AreSane(const std::vector<CFeeRate>& vecFee, const CFeeRate& minRelayFee)
|
|
|
|
{
|
|
|
|
BOOST_FOREACH(CFeeRate fee, vecFee)
|
|
|
|
{
|
|
|
|
if (fee < CFeeRate(0))
|
|
|
|
return false;
|
|
|
|
if (fee.GetFeePerK() > minRelayFee.GetFeePerK() * 10000)
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
bool AreSane(const std::vector<double> vecPriority)
|
|
|
|
{
|
|
|
|
BOOST_FOREACH(double priority, vecPriority)
|
|
|
|
{
|
|
|
|
if (priority < 0)
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
void Write(CAutoFile& fileout) const
|
|
|
|
{
|
|
|
|
std::vector<CFeeRate> vecFee = buf2vec(feeSamples);
|
|
|
|
fileout << vecFee;
|
|
|
|
std::vector<double> vecPriority = buf2vec(prioritySamples);
|
|
|
|
fileout << vecPriority;
|
|
|
|
}
|
|
|
|
|
|
|
|
void Read(CAutoFile& filein, const CFeeRate& minRelayFee) {
|
|
|
|
std::vector<CFeeRate> vecFee;
|
|
|
|
filein >> vecFee;
|
|
|
|
if (AreSane(vecFee, minRelayFee))
|
|
|
|
feeSamples.insert(feeSamples.end(), vecFee.begin(), vecFee.end());
|
|
|
|
else
|
|
|
|
throw runtime_error("Corrupt fee value in estimates file.");
|
|
|
|
std::vector<double> vecPriority;
|
|
|
|
filein >> vecPriority;
|
|
|
|
if (AreSane(vecPriority))
|
|
|
|
prioritySamples.insert(prioritySamples.end(), vecPriority.begin(), vecPriority.end());
|
|
|
|
else
|
|
|
|
throw runtime_error("Corrupt priority value in estimates file.");
|
|
|
|
if (feeSamples.size() + prioritySamples.size() > 0)
|
|
|
|
LogPrint("estimatefee", "Read %d fee samples and %d priority samples\n",
|
|
|
|
feeSamples.size(), prioritySamples.size());
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
class CMinerPolicyEstimator
|
|
|
|
{
|
|
|
|
private:
|
|
|
|
// Records observed averages transactions that confirmed within one block, two blocks,
|
|
|
|
// three blocks etc.
|
|
|
|
std::vector<CBlockAverage> history;
|
|
|
|
std::vector<CFeeRate> sortedFeeSamples;
|
|
|
|
std::vector<double> sortedPrioritySamples;
|
|
|
|
|
|
|
|
int nBestSeenHeight;
|
|
|
|
|
|
|
|
// nBlocksAgo is 0 based, i.e. transactions that confirmed in the highest seen block are
|
|
|
|
// nBlocksAgo == 0, transactions in the block before that are nBlocksAgo == 1 etc.
|
|
|
|
void seenTxConfirm(const CFeeRate& feeRate, const CFeeRate& minRelayFee, double dPriority, int nBlocksAgo)
|
|
|
|
{
|
|
|
|
// Last entry records "everything else".
|
|
|
|
int nBlocksTruncated = min(nBlocksAgo, (int) history.size() - 1);
|
|
|
|
assert(nBlocksTruncated >= 0);
|
|
|
|
|
|
|
|
// We need to guess why the transaction was included in a block-- either
|
|
|
|
// because it is high-priority or because it has sufficient fees.
|
|
|
|
bool sufficientFee = (feeRate > minRelayFee);
|
|
|
|
bool sufficientPriority = AllowFree(dPriority);
|
|
|
|
const char* assignedTo = "unassigned";
|
|
|
|
if (sufficientFee && !sufficientPriority)
|
|
|
|
{
|
|
|
|
history[nBlocksTruncated].RecordFee(feeRate);
|
|
|
|
assignedTo = "fee";
|
|
|
|
}
|
|
|
|
else if (sufficientPriority && !sufficientFee)
|
|
|
|
{
|
|
|
|
history[nBlocksTruncated].RecordPriority(dPriority);
|
|
|
|
assignedTo = "priority";
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
// Neither or both fee and priority sufficient to get confirmed:
|
|
|
|
// don't know why they got confirmed.
|
|
|
|
}
|
|
|
|
LogPrint("estimatefee", "Seen TX confirm: %s : %s fee/%g priority, took %d blocks\n",
|
|
|
|
assignedTo, feeRate.ToString(), dPriority, nBlocksAgo);
|
|
|
|
}
|
|
|
|
|
|
|
|
public:
|
|
|
|
CMinerPolicyEstimator(int nEntries) : nBestSeenHeight(0)
|
|
|
|
{
|
|
|
|
history.resize(nEntries);
|
|
|
|
}
|
|
|
|
|
|
|
|
void seenBlock(const std::vector<CTxMemPoolEntry>& entries, int nBlockHeight, const CFeeRate minRelayFee)
|
|
|
|
{
|
|
|
|
if (nBlockHeight <= nBestSeenHeight)
|
|
|
|
{
|
|
|
|
// Ignore side chains and re-orgs; assuming they are random
|
|
|
|
// they don't affect the estimate.
|
|
|
|
// And if an attacker can re-org the chain at will, then
|
|
|
|
// you've got much bigger problems than "attacker can influence
|
|
|
|
// transaction fees."
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
nBestSeenHeight = nBlockHeight;
|
|
|
|
|
|
|
|
// Fill up the history buckets based on how long transactions took
|
|
|
|
// to confirm.
|
|
|
|
std::vector<std::vector<const CTxMemPoolEntry*> > entriesByConfirmations;
|
|
|
|
entriesByConfirmations.resize(history.size());
|
|
|
|
BOOST_FOREACH(const CTxMemPoolEntry& entry, entries)
|
|
|
|
{
|
|
|
|
// How many blocks did it take for miners to include this transaction?
|
|
|
|
int delta = nBlockHeight - entry.GetHeight();
|
|
|
|
if (delta <= 0)
|
|
|
|
{
|
|
|
|
// Re-org made us lose height, this should only happen if we happen
|
|
|
|
// to re-org on a difficulty transition point: very rare!
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if ((delta-1) >= (int)history.size())
|
|
|
|
delta = history.size(); // Last bucket is catch-all
|
|
|
|
entriesByConfirmations[delta-1].push_back(&entry);
|
|
|
|
}
|
|
|
|
for (size_t i = 0; i < entriesByConfirmations.size(); i++)
|
|
|
|
{
|
|
|
|
std::vector<const CTxMemPoolEntry*> &e = entriesByConfirmations.at(i);
|
|
|
|
// Insert at most 10 random entries per bucket, otherwise a single block
|
|
|
|
// can dominate an estimate:
|
|
|
|
if (e.size() > 10) {
|
|
|
|
std::random_shuffle(e.begin(), e.end());
|
|
|
|
e.resize(10);
|
|
|
|
}
|
|
|
|
BOOST_FOREACH(const CTxMemPoolEntry* entry, e)
|
|
|
|
{
|
|
|
|
// Fees are stored and reported as BTC-per-kb:
|
|
|
|
CFeeRate feeRate(entry->GetFee(), entry->GetTxSize());
|
|
|
|
double dPriority = entry->GetPriority(entry->GetHeight()); // Want priority when it went IN
|
|
|
|
seenTxConfirm(feeRate, minRelayFee, dPriority, i);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
//After new samples are added, we have to clear the sorted lists,
|
|
|
|
//so they'll be resorted the next time someone asks for an estimate
|
|
|
|
sortedFeeSamples.clear();
|
|
|
|
sortedPrioritySamples.clear();
|
|
|
|
|
|
|
|
for (size_t i = 0; i < history.size(); i++) {
|
|
|
|
if (history[i].FeeSamples() + history[i].PrioritySamples() > 0)
|
|
|
|
LogPrint("estimatefee", "estimates: for confirming within %d blocks based on %d/%d samples, fee=%s, prio=%g\n",
|
|
|
|
i,
|
|
|
|
history[i].FeeSamples(), history[i].PrioritySamples(),
|
|
|
|
estimateFee(i+1).ToString(), estimatePriority(i+1));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Can return CFeeRate(0) if we don't have any data for that many blocks back. nBlocksToConfirm is 1 based.
|
|
|
|
CFeeRate estimateFee(int nBlocksToConfirm)
|
|
|
|
{
|
|
|
|
nBlocksToConfirm--;
|
|
|
|
|
|
|
|
if (nBlocksToConfirm < 0 || nBlocksToConfirm >= (int)history.size())
|
|
|
|
return CFeeRate(0);
|
|
|
|
|
|
|
|
if (sortedFeeSamples.size() == 0)
|
|
|
|
{
|
|
|
|
for (size_t i = 0; i < history.size(); i++)
|
|
|
|
history.at(i).GetFeeSamples(sortedFeeSamples);
|
|
|
|
std::sort(sortedFeeSamples.begin(), sortedFeeSamples.end(),
|
|
|
|
std::greater<CFeeRate>());
|
|
|
|
}
|
|
|
|
if (sortedFeeSamples.size() < 11)
|
|
|
|
{
|
|
|
|
// Eleven is Gavin's Favorite Number
|
|
|
|
// ... but we also take a maximum of 10 samples per block so eleven means
|
|
|
|
// we're getting samples from at least two different blocks
|
|
|
|
return CFeeRate(0);
|
|
|
|
}
|
|
|
|
|
|
|
|
int nBucketSize = history.at(nBlocksToConfirm).FeeSamples();
|
|
|
|
|
|
|
|
// Estimates should not increase as number of confirmations goes up,
|
|
|
|
// but the estimates are noisy because confirmations happen discretely
|
|
|
|
// in blocks. To smooth out the estimates, use all samples in the history
|
|
|
|
// and use the nth highest where n is (number of samples in previous bucket +
|
|
|
|
// half the samples in nBlocksToConfirm bucket):
|
|
|
|
size_t nPrevSize = 0;
|
|
|
|
for (int i = 0; i < nBlocksToConfirm; i++)
|
|
|
|
nPrevSize += history.at(i).FeeSamples();
|
|
|
|
size_t index = min(nPrevSize + nBucketSize/2, sortedFeeSamples.size()-1);
|
|
|
|
return sortedFeeSamples[index];
|
|
|
|
}
|
|
|
|
double estimatePriority(int nBlocksToConfirm)
|
|
|
|
{
|
|
|
|
nBlocksToConfirm--;
|
|
|
|
|
|
|
|
if (nBlocksToConfirm < 0 || nBlocksToConfirm >= (int)history.size())
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
if (sortedPrioritySamples.size() == 0)
|
|
|
|
{
|
|
|
|
for (size_t i = 0; i < history.size(); i++)
|
|
|
|
history.at(i).GetPrioritySamples(sortedPrioritySamples);
|
|
|
|
std::sort(sortedPrioritySamples.begin(), sortedPrioritySamples.end(),
|
|
|
|
std::greater<double>());
|
|
|
|
}
|
|
|
|
if (sortedPrioritySamples.size() < 11)
|
|
|
|
return -1.0;
|
|
|
|
|
|
|
|
int nBucketSize = history.at(nBlocksToConfirm).PrioritySamples();
|
|
|
|
|
|
|
|
// Estimates should not increase as number of confirmations needed goes up,
|
|
|
|
// but the estimates are noisy because confirmations happen discretely
|
|
|
|
// in blocks. To smooth out the estimates, use all samples in the history
|
|
|
|
// and use the nth highest where n is (number of samples in previous buckets +
|
|
|
|
// half the samples in nBlocksToConfirm bucket).
|
|
|
|
size_t nPrevSize = 0;
|
|
|
|
for (int i = 0; i < nBlocksToConfirm; i++)
|
|
|
|
nPrevSize += history.at(i).PrioritySamples();
|
|
|
|
size_t index = min(nPrevSize + nBucketSize/2, sortedPrioritySamples.size()-1);
|
|
|
|
return sortedPrioritySamples[index];
|
|
|
|
}
|
|
|
|
|
|
|
|
void Write(CAutoFile& fileout) const
|
|
|
|
{
|
|
|
|
fileout << nBestSeenHeight;
|
|
|
|
fileout << history.size();
|
|
|
|
BOOST_FOREACH(const CBlockAverage& entry, history)
|
|
|
|
{
|
|
|
|
entry.Write(fileout);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void Read(CAutoFile& filein, const CFeeRate& minRelayFee)
|
|
|
|
{
|
|
|
|
filein >> nBestSeenHeight;
|
|
|
|
size_t numEntries;
|
|
|
|
filein >> numEntries;
|
|
|
|
history.clear();
|
|
|
|
for (size_t i = 0; i < numEntries; i++)
|
|
|
|
{
|
|
|
|
CBlockAverage entry;
|
|
|
|
entry.Read(filein, minRelayFee);
|
|
|
|
history.push_back(entry);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
CTxMemPool::CTxMemPool(const CFeeRate& _minRelayFee) : minRelayFee(_minRelayFee)
|
|
|
|
{
|
|
|
|
// Sanity checks off by default for performance, because otherwise
|
|
|
|
// accepting transactions becomes O(N^2) where N is the number
|
|
|
|
// of transactions in the pool
|
|
|
|
fSanityCheck = false;
|
|
|
|
|
|
|
|
// 25 blocks is a compromise between using a lot of disk/memory and
|
|
|
|
// trying to give accurate estimates to people who might be willing
|
|
|
|
// to wait a day or two to save a fraction of a penny in fees.
|
|
|
|
// Confirmation times for very-low-fee transactions that take more
|
|
|
|
// than an hour or three to confirm are highly variable.
|
|
|
|
minerPolicyEstimator = new CMinerPolicyEstimator(25);
|
|
|
|
}
|
|
|
|
|
|
|
|
CTxMemPool::~CTxMemPool()
|
|
|
|
{
|
|
|
|
delete minerPolicyEstimator;
|
|
|
|
}
|
|
|
|
|
|
|
|
void CTxMemPool::pruneSpent(const uint256 &hashTx, CCoins &coins)
|
|
|
|
{
|
|
|
|
LOCK(cs);
|
|
|
|
|
|
|
|
std::map<COutPoint, CInPoint>::iterator it = mapNextTx.lower_bound(COutPoint(hashTx, 0));
|
|
|
|
|
|
|
|
// iterate over all COutPoints in mapNextTx whose hash equals the provided hashTx
|
|
|
|
while (it != mapNextTx.end() && it->first.hash == hashTx) {
|
|
|
|
coins.Spend(it->first.n); // and remove those outputs from coins
|
|
|
|
it++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned int CTxMemPool::GetTransactionsUpdated() const
|
|
|
|
{
|
|
|
|
LOCK(cs);
|
|
|
|
return nTransactionsUpdated;
|
|
|
|
}
|
|
|
|
|
|
|
|
void CTxMemPool::AddTransactionsUpdated(unsigned int n)
|
|
|
|
{
|
|
|
|
LOCK(cs);
|
|
|
|
nTransactionsUpdated += n;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
bool CTxMemPool::addUnchecked(const uint256& hash, const CTxMemPoolEntry &entry)
|
|
|
|
{
|
|
|
|
// Add to memory pool without checking anything.
|
|
|
|
// Used by main.cpp AcceptToMemoryPool(), which DOES do
|
|
|
|
// all the appropriate checks.
|
|
|
|
LOCK(cs);
|
|
|
|
{
|
|
|
|
mapTx[hash] = entry;
|
|
|
|
const CTransaction& tx = mapTx[hash].GetTx();
|
|
|
|
for (unsigned int i = 0; i < tx.vin.size(); i++)
|
|
|
|
mapNextTx[tx.vin[i].prevout] = CInPoint(&tx, i);
|
|
|
|
nTransactionsUpdated++;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void CTxMemPool::remove(const CTransaction &tx, std::list<CTransaction>& removed, bool fRecursive)
|
|
|
|
{
|
|
|
|
// Remove transaction from memory pool
|
|
|
|
{
|
|
|
|
LOCK(cs);
|
|
|
|
uint256 hash = tx.GetHash();
|
|
|
|
if (fRecursive) {
|
|
|
|
for (unsigned int i = 0; i < tx.vout.size(); i++) {
|
|
|
|
std::map<COutPoint, CInPoint>::iterator it = mapNextTx.find(COutPoint(hash, i));
|
|
|
|
if (it == mapNextTx.end())
|
|
|
|
continue;
|
|
|
|
remove(*it->second.ptx, removed, true);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (mapTx.count(hash))
|
|
|
|
{
|
|
|
|
removed.push_front(tx);
|
|
|
|
BOOST_FOREACH(const CTxIn& txin, tx.vin)
|
|
|
|
mapNextTx.erase(txin.prevout);
|
|
|
|
mapTx.erase(hash);
|
|
|
|
nTransactionsUpdated++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void CTxMemPool::removeConflicts(const CTransaction &tx, std::list<CTransaction>& removed)
|
|
|
|
{
|
|
|
|
// Remove transactions which depend on inputs of tx, recursively
|
|
|
|
list<CTransaction> result;
|
|
|
|
LOCK(cs);
|
|
|
|
BOOST_FOREACH(const CTxIn &txin, tx.vin) {
|
|
|
|
std::map<COutPoint, CInPoint>::iterator it = mapNextTx.find(txin.prevout);
|
|
|
|
if (it != mapNextTx.end()) {
|
|
|
|
const CTransaction &txConflict = *it->second.ptx;
|
|
|
|
if (txConflict != tx)
|
|
|
|
{
|
|
|
|
remove(txConflict, removed, true);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Called when a block is connected. Removes from mempool and updates the miner fee estimator.
|
|
|
|
void CTxMemPool::removeForBlock(const std::vector<CTransaction>& vtx, unsigned int nBlockHeight,
|
|
|
|
std::list<CTransaction>& conflicts)
|
|
|
|
{
|
|
|
|
LOCK(cs);
|
|
|
|
std::vector<CTxMemPoolEntry> entries;
|
|
|
|
BOOST_FOREACH(const CTransaction& tx, vtx)
|
|
|
|
{
|
|
|
|
uint256 hash = tx.GetHash();
|
|
|
|
if (mapTx.count(hash))
|
|
|
|
entries.push_back(mapTx[hash]);
|
|
|
|
}
|
|
|
|
minerPolicyEstimator->seenBlock(entries, nBlockHeight, minRelayFee);
|
|
|
|
BOOST_FOREACH(const CTransaction& tx, vtx)
|
|
|
|
{
|
|
|
|
std::list<CTransaction> dummy;
|
|
|
|
remove(tx, dummy, false);
|
|
|
|
removeConflicts(tx, conflicts);
|
|
|
|
ClearPrioritisation(tx.GetHash());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void CTxMemPool::clear()
|
|
|
|
{
|
|
|
|
LOCK(cs);
|
|
|
|
mapTx.clear();
|
|
|
|
mapNextTx.clear();
|
|
|
|
++nTransactionsUpdated;
|
|
|
|
}
|
|
|
|
|
|
|
|
void CTxMemPool::check(CCoinsViewCache *pcoins) const
|
|
|
|
{
|
|
|
|
if (!fSanityCheck)
|
|
|
|
return;
|
|
|
|
|
|
|
|
LogPrint("mempool", "Checking mempool with %u transactions and %u inputs\n", (unsigned int)mapTx.size(), (unsigned int)mapNextTx.size());
|
|
|
|
|
|
|
|
LOCK(cs);
|
|
|
|
for (std::map<uint256, CTxMemPoolEntry>::const_iterator it = mapTx.begin(); it != mapTx.end(); it++) {
|
|
|
|
unsigned int i = 0;
|
|
|
|
const CTransaction& tx = it->second.GetTx();
|
|
|
|
BOOST_FOREACH(const CTxIn &txin, tx.vin) {
|
|
|
|
// Check that every mempool transaction's inputs refer to available coins, or other mempool tx's.
|
|
|
|
std::map<uint256, CTxMemPoolEntry>::const_iterator it2 = mapTx.find(txin.prevout.hash);
|
|
|
|
if (it2 != mapTx.end()) {
|
|
|
|
const CTransaction& tx2 = it2->second.GetTx();
|
|
|
|
assert(tx2.vout.size() > txin.prevout.n && !tx2.vout[txin.prevout.n].IsNull());
|
|
|
|
} else {
|
|
|
|
CCoins &coins = pcoins->GetCoins(txin.prevout.hash);
|
|
|
|
assert(coins.IsAvailable(txin.prevout.n));
|
|
|
|
}
|
|
|
|
// Check whether its inputs are marked in mapNextTx.
|
|
|
|
std::map<COutPoint, CInPoint>::const_iterator it3 = mapNextTx.find(txin.prevout);
|
|
|
|
assert(it3 != mapNextTx.end());
|
|
|
|
assert(it3->second.ptx == &tx);
|
|
|
|
assert(it3->second.n == i);
|
|
|
|
i++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for (std::map<COutPoint, CInPoint>::const_iterator it = mapNextTx.begin(); it != mapNextTx.end(); it++) {
|
|
|
|
uint256 hash = it->second.ptx->GetHash();
|
|
|
|
map<uint256, CTxMemPoolEntry>::const_iterator it2 = mapTx.find(hash);
|
|
|
|
const CTransaction& tx = it2->second.GetTx();
|
|
|
|
assert(it2 != mapTx.end());
|
|
|
|
assert(&tx == it->second.ptx);
|
|
|
|
assert(tx.vin.size() > it->second.n);
|
|
|
|
assert(it->first == it->second.ptx->vin[it->second.n].prevout);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void CTxMemPool::queryHashes(vector<uint256>& vtxid)
|
|
|
|
{
|
|
|
|
vtxid.clear();
|
|
|
|
|
|
|
|
LOCK(cs);
|
|
|
|
vtxid.reserve(mapTx.size());
|
|
|
|
for (map<uint256, CTxMemPoolEntry>::iterator mi = mapTx.begin(); mi != mapTx.end(); ++mi)
|
|
|
|
vtxid.push_back((*mi).first);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool CTxMemPool::lookup(uint256 hash, CTransaction& result) const
|
|
|
|
{
|
|
|
|
LOCK(cs);
|
|
|
|
map<uint256, CTxMemPoolEntry>::const_iterator i = mapTx.find(hash);
|
|
|
|
if (i == mapTx.end()) return false;
|
|
|
|
result = i->second.GetTx();
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
CFeeRate CTxMemPool::estimateFee(int nBlocks) const
|
|
|
|
{
|
|
|
|
LOCK(cs);
|
|
|
|
return minerPolicyEstimator->estimateFee(nBlocks);
|
|
|
|
}
|
|
|
|
double CTxMemPool::estimatePriority(int nBlocks) const
|
|
|
|
{
|
|
|
|
LOCK(cs);
|
|
|
|
return minerPolicyEstimator->estimatePriority(nBlocks);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
CTxMemPool::WriteFeeEstimates(CAutoFile& fileout) const
|
|
|
|
{
|
|
|
|
try {
|
|
|
|
LOCK(cs);
|
|
|
|
fileout << 99900; // version required to read: 0.9.99 or later
|
|
|
|
fileout << CLIENT_VERSION; // version that wrote the file
|
|
|
|
minerPolicyEstimator->Write(fileout);
|
|
|
|
}
|
|
|
|
catch (std::exception &e) {
|
|
|
|
LogPrintf("CTxMemPool::WriteFeeEstimates() : unable to write policy estimator data (non-fatal)");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
CTxMemPool::ReadFeeEstimates(CAutoFile& filein)
|
|
|
|
{
|
|
|
|
try {
|
|
|
|
int nVersionRequired, nVersionThatWrote;
|
|
|
|
filein >> nVersionRequired >> nVersionThatWrote;
|
|
|
|
if (nVersionRequired > CLIENT_VERSION)
|
|
|
|
return error("CTxMemPool::ReadFeeEstimates() : up-version (%d) fee estimate file", nVersionRequired);
|
|
|
|
|
|
|
|
LOCK(cs);
|
|
|
|
minerPolicyEstimator->Read(filein, minRelayFee);
|
|
|
|
}
|
|
|
|
catch (std::exception &e) {
|
|
|
|
LogPrintf("CTxMemPool::ReadFeeEstimates() : unable to read policy estimator data (non-fatal)");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
void CTxMemPool::PrioritiseTransaction(const uint256 hash, const string strHash, double dPriorityDelta, int64_t nFeeDelta)
|
|
|
|
{
|
|
|
|
{
|
|
|
|
LOCK(cs);
|
|
|
|
std::pair<double, int64_t> &deltas = mapDeltas[hash];
|
|
|
|
deltas.first += dPriorityDelta;
|
|
|
|
deltas.second += nFeeDelta;
|
|
|
|
}
|
|
|
|
LogPrintf("PrioritiseTransaction: %s priority += %f, fee += %d\n", strHash.c_str(), dPriorityDelta, nFeeDelta);
|
|
|
|
}
|
|
|
|
|
|
|
|
void CTxMemPool::ApplyDeltas(const uint256 hash, double &dPriorityDelta, int64_t &nFeeDelta)
|
|
|
|
{
|
|
|
|
LOCK(cs);
|
|
|
|
std::map<uint256, std::pair<double, int64_t> >::iterator pos = mapDeltas.find(hash);
|
|
|
|
if (pos == mapDeltas.end())
|
|
|
|
return;
|
|
|
|
const std::pair<double, int64_t> &deltas = pos->second;
|
|
|
|
dPriorityDelta += deltas.first;
|
|
|
|
nFeeDelta += deltas.second;
|
|
|
|
}
|
|
|
|
|
|
|
|
void CTxMemPool::ClearPrioritisation(const uint256 hash)
|
|
|
|
{
|
|
|
|
LOCK(cs);
|
|
|
|
mapDeltas.erase(hash);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
CCoinsViewMemPool::CCoinsViewMemPool(CCoinsView &baseIn, CTxMemPool &mempoolIn) : CCoinsViewBacked(baseIn), mempool(mempoolIn) { }
|
|
|
|
|
|
|
|
bool CCoinsViewMemPool::GetCoins(const uint256 &txid, CCoins &coins) {
|
|
|
|
// If an entry in the mempool exists, always return that one, as it's guaranteed to never
|
|
|
|
// conflict with the underlying cache, and it cannot have pruned entries (as it contains full)
|
|
|
|
// transactions. First checking the underlying cache risks returning a pruned entry instead.
|
|
|
|
CTransaction tx;
|
|
|
|
if (mempool.lookup(txid, tx)) {
|
|
|
|
coins = CCoins(tx, MEMPOOL_HEIGHT);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return (base->GetCoins(txid, coins) && !coins.IsPruned());
|
|
|
|
}
|
|
|
|
|
|
|
|
bool CCoinsViewMemPool::HaveCoins(const uint256 &txid) {
|
|
|
|
return mempool.exists(txid) || base->HaveCoins(txid);
|
|
|
|
}
|
|
|
|
|