Browse Source

Merge #7846: Clean up lockorder data of destroyed mutexes

5eeb913 Clean up lockorder data of destroyed mutexes (Pieter Wuille)
0.13
Wladimir J. van der Laan 9 years ago
parent
commit
491171f929
No known key found for this signature in database
GPG Key ID: 74810B012346C9A6
  1. 55
      src/sync.cpp
  2. 33
      src/sync.h

55
src/sync.cpp

@ -56,11 +56,24 @@ private: @@ -56,11 +56,24 @@ private:
};
typedef std::vector<std::pair<void*, CLockLocation> > LockStack;
typedef std::map<std::pair<void*, void*>, LockStack> LockOrders;
typedef std::set<std::pair<void*, void*> > InvLockOrders;
static boost::mutex dd_mutex;
static std::map<std::pair<void*, void*>, LockStack> lockorders;
static boost::thread_specific_ptr<LockStack> lockstack;
struct LockData {
// Very ugly hack: as the global constructs and destructors run single
// threaded, we use this boolean to know whether LockData still exists,
// as DeleteLock can get called by global CCriticalSection destructors
// after LockData disappears.
bool available;
LockData() : available(true) {}
~LockData() { available = false; }
LockOrders lockorders;
InvLockOrders invlockorders;
boost::mutex dd_mutex;
} static lockdata;
boost::thread_specific_ptr<LockStack> lockstack;
static void potential_deadlock_detected(const std::pair<void*, void*>& mismatch, const LockStack& s1, const LockStack& s2)
{
@ -117,7 +130,7 @@ static void push_lock(void* c, const CLockLocation& locklocation, bool fTry) @@ -117,7 +130,7 @@ static void push_lock(void* c, const CLockLocation& locklocation, bool fTry)
if (lockstack.get() == NULL)
lockstack.reset(new LockStack);
dd_mutex.lock();
boost::unique_lock<boost::mutex> lock(lockdata.dd_mutex);
(*lockstack).push_back(std::make_pair(c, locklocation));
@ -127,23 +140,21 @@ static void push_lock(void* c, const CLockLocation& locklocation, bool fTry) @@ -127,23 +140,21 @@ static void push_lock(void* c, const CLockLocation& locklocation, bool fTry)
break;
std::pair<void*, void*> p1 = std::make_pair(i.first, c);
if (lockorders.count(p1))
if (lockdata.lockorders.count(p1))
continue;
lockorders[p1] = (*lockstack);
lockdata.lockorders[p1] = (*lockstack);
std::pair<void*, void*> p2 = std::make_pair(c, i.first);
if (lockorders.count(p2))
potential_deadlock_detected(p1, lockorders[p2], lockorders[p1]);
lockdata.invlockorders.insert(p2);
if (lockdata.lockorders.count(p2))
potential_deadlock_detected(p1, lockdata.lockorders[p2], lockdata.lockorders[p1]);
}
}
dd_mutex.unlock();
}
static void pop_lock()
{
dd_mutex.lock();
(*lockstack).pop_back();
dd_mutex.unlock();
}
void EnterCritical(const char* pszName, const char* pszFile, int nLine, void* cs, bool fTry)
@ -173,4 +184,26 @@ void AssertLockHeldInternal(const char* pszName, const char* pszFile, int nLine, @@ -173,4 +184,26 @@ void AssertLockHeldInternal(const char* pszName, const char* pszFile, int nLine,
abort();
}
void DeleteLock(void* cs)
{
if (!lockdata.available) {
// We're already shutting down.
return;
}
boost::unique_lock<boost::mutex> lock(lockdata.dd_mutex);
std::pair<void*, void*> item = std::make_pair(cs, (void*)0);
LockOrders::iterator it = lockdata.lockorders.lower_bound(item);
while (it != lockdata.lockorders.end() && it->first.first == cs) {
std::pair<void*, void*> invitem = std::make_pair(it->first.second, it->first.first);
lockdata.invlockorders.erase(invitem);
lockdata.lockorders.erase(it++);
}
InvLockOrders::iterator invit = lockdata.invlockorders.lower_bound(item);
while (invit != lockdata.invlockorders.end() && invit->first == cs) {
std::pair<void*, void*> invinvitem = std::make_pair(invit->second, invit->first);
lockdata.lockorders.erase(invinvitem);
lockdata.invlockorders.erase(invit++);
}
}
#endif /* DEBUG_LOCKORDER */

33
src/sync.h

@ -71,30 +71,39 @@ public: @@ -71,30 +71,39 @@ public:
}
};
/**
* Wrapped boost mutex: supports recursive locking, but no waiting
* TODO: We should move away from using the recursive lock by default.
*/
typedef AnnotatedMixin<boost::recursive_mutex> CCriticalSection;
/** Wrapped boost mutex: supports waiting but not recursive locking */
typedef AnnotatedMixin<boost::mutex> CWaitableCriticalSection;
/** Just a typedef for boost::condition_variable, can be wrapped later if desired */
typedef boost::condition_variable CConditionVariable;
#ifdef DEBUG_LOCKORDER
void EnterCritical(const char* pszName, const char* pszFile, int nLine, void* cs, bool fTry = false);
void LeaveCritical();
std::string LocksHeld();
void AssertLockHeldInternal(const char* pszName, const char* pszFile, int nLine, void* cs);
void DeleteLock(void* cs);
#else
void static inline EnterCritical(const char* pszName, const char* pszFile, int nLine, void* cs, bool fTry = false) {}
void static inline LeaveCritical() {}
void static inline AssertLockHeldInternal(const char* pszName, const char* pszFile, int nLine, void* cs) {}
void static inline DeleteLock(void* cs) {}
#endif
#define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs)
/**
* Wrapped boost mutex: supports recursive locking, but no waiting
* TODO: We should move away from using the recursive lock by default.
*/
class CCriticalSection : public AnnotatedMixin<boost::recursive_mutex>
{
public:
~CCriticalSection() {
DeleteLock((void*)this);
}
};
typedef CCriticalSection CDynamicCriticalSection;
/** Wrapped boost mutex: supports waiting but not recursive locking */
typedef AnnotatedMixin<boost::mutex> CWaitableCriticalSection;
/** Just a typedef for boost::condition_variable, can be wrapped later if desired */
typedef boost::condition_variable CConditionVariable;
#ifdef DEBUG_LOCKCONTENTION
void PrintLockContention(const char* pszName, const char* pszFile, int nLine);
#endif

Loading…
Cancel
Save