Browse Source

Clean up lockorder data of destroyed mutexes

The lockorder potential deadlock detection works by remembering for each
lock A that is acquired while holding another B the pair (A,B), and
triggering a warning when (B,A) already exists in the table.

A and B in the above text are represented by pointers to the CCriticalSection
object that is acquired. This does mean however that we need to clean up the
table entries that refer to any critical section which is destroyed, as it
memory address can potentially be used for another unrelated lock in the future.

Implement this clean up by remembering not only the pairs in forward direction,
but also backward direction. This allows for fast iteration over all pairs that
use a deleted CCriticalSection in either the first or the second position.
0.13
Pieter Wuille 9 years ago
parent
commit
5eeb913d6c
  1. 55
      src/sync.cpp
  2. 33
      src/sync.h

55
src/sync.cpp

@ -56,11 +56,24 @@ private:
}; };
typedef std::vector<std::pair<void*, CLockLocation> > LockStack; typedef std::vector<std::pair<void*, CLockLocation> > LockStack;
typedef std::map<std::pair<void*, void*>, LockStack> LockOrders;
typedef std::set<std::pair<void*, void*> > InvLockOrders;
static boost::mutex dd_mutex; struct LockData {
static std::map<std::pair<void*, void*>, LockStack> lockorders; // Very ugly hack: as the global constructs and destructors run single
static boost::thread_specific_ptr<LockStack> lockstack; // threaded, we use this boolean to know whether LockData still exists,
// as DeleteLock can get called by global CCriticalSection destructors
// after LockData disappears.
bool available;
LockData() : available(true) {}
~LockData() { available = false; }
LockOrders lockorders;
InvLockOrders invlockorders;
boost::mutex dd_mutex;
} static lockdata;
boost::thread_specific_ptr<LockStack> lockstack;
static void potential_deadlock_detected(const std::pair<void*, void*>& mismatch, const LockStack& s1, const LockStack& s2) static void potential_deadlock_detected(const std::pair<void*, void*>& mismatch, const LockStack& s1, const LockStack& s2)
{ {
@ -117,7 +130,7 @@ static void push_lock(void* c, const CLockLocation& locklocation, bool fTry)
if (lockstack.get() == NULL) if (lockstack.get() == NULL)
lockstack.reset(new LockStack); lockstack.reset(new LockStack);
dd_mutex.lock(); boost::unique_lock<boost::mutex> lock(lockdata.dd_mutex);
(*lockstack).push_back(std::make_pair(c, locklocation)); (*lockstack).push_back(std::make_pair(c, locklocation));
@ -127,23 +140,21 @@ static void push_lock(void* c, const CLockLocation& locklocation, bool fTry)
break; break;
std::pair<void*, void*> p1 = std::make_pair(i.first, c); std::pair<void*, void*> p1 = std::make_pair(i.first, c);
if (lockorders.count(p1)) if (lockdata.lockorders.count(p1))
continue; continue;
lockorders[p1] = (*lockstack); lockdata.lockorders[p1] = (*lockstack);
std::pair<void*, void*> p2 = std::make_pair(c, i.first); std::pair<void*, void*> p2 = std::make_pair(c, i.first);
if (lockorders.count(p2)) lockdata.invlockorders.insert(p2);
potential_deadlock_detected(p1, lockorders[p2], lockorders[p1]); if (lockdata.lockorders.count(p2))
potential_deadlock_detected(p1, lockdata.lockorders[p2], lockdata.lockorders[p1]);
} }
} }
dd_mutex.unlock();
} }
static void pop_lock() static void pop_lock()
{ {
dd_mutex.lock();
(*lockstack).pop_back(); (*lockstack).pop_back();
dd_mutex.unlock();
} }
void EnterCritical(const char* pszName, const char* pszFile, int nLine, void* cs, bool fTry) void EnterCritical(const char* pszName, const char* pszFile, int nLine, void* cs, bool fTry)
@ -173,4 +184,26 @@ void AssertLockHeldInternal(const char* pszName, const char* pszFile, int nLine,
abort(); abort();
} }
void DeleteLock(void* cs)
{
if (!lockdata.available) {
// We're already shutting down.
return;
}
boost::unique_lock<boost::mutex> lock(lockdata.dd_mutex);
std::pair<void*, void*> item = std::make_pair(cs, (void*)0);
LockOrders::iterator it = lockdata.lockorders.lower_bound(item);
while (it != lockdata.lockorders.end() && it->first.first == cs) {
std::pair<void*, void*> invitem = std::make_pair(it->first.second, it->first.first);
lockdata.invlockorders.erase(invitem);
lockdata.lockorders.erase(it++);
}
InvLockOrders::iterator invit = lockdata.invlockorders.lower_bound(item);
while (invit != lockdata.invlockorders.end() && invit->first == cs) {
std::pair<void*, void*> invinvitem = std::make_pair(invit->second, invit->first);
lockdata.lockorders.erase(invinvitem);
lockdata.invlockorders.erase(invit++);
}
}
#endif /* DEBUG_LOCKORDER */ #endif /* DEBUG_LOCKORDER */

33
src/sync.h

@ -71,30 +71,39 @@ public:
} }
}; };
/**
* Wrapped boost mutex: supports recursive locking, but no waiting
* TODO: We should move away from using the recursive lock by default.
*/
typedef AnnotatedMixin<boost::recursive_mutex> CCriticalSection;
/** Wrapped boost mutex: supports waiting but not recursive locking */
typedef AnnotatedMixin<boost::mutex> CWaitableCriticalSection;
/** Just a typedef for boost::condition_variable, can be wrapped later if desired */
typedef boost::condition_variable CConditionVariable;
#ifdef DEBUG_LOCKORDER #ifdef DEBUG_LOCKORDER
void EnterCritical(const char* pszName, const char* pszFile, int nLine, void* cs, bool fTry = false); void EnterCritical(const char* pszName, const char* pszFile, int nLine, void* cs, bool fTry = false);
void LeaveCritical(); void LeaveCritical();
std::string LocksHeld(); std::string LocksHeld();
void AssertLockHeldInternal(const char* pszName, const char* pszFile, int nLine, void* cs); void AssertLockHeldInternal(const char* pszName, const char* pszFile, int nLine, void* cs);
void DeleteLock(void* cs);
#else #else
void static inline EnterCritical(const char* pszName, const char* pszFile, int nLine, void* cs, bool fTry = false) {} void static inline EnterCritical(const char* pszName, const char* pszFile, int nLine, void* cs, bool fTry = false) {}
void static inline LeaveCritical() {} void static inline LeaveCritical() {}
void static inline AssertLockHeldInternal(const char* pszName, const char* pszFile, int nLine, void* cs) {} void static inline AssertLockHeldInternal(const char* pszName, const char* pszFile, int nLine, void* cs) {}
void static inline DeleteLock(void* cs) {}
#endif #endif
#define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs)
/**
* Wrapped boost mutex: supports recursive locking, but no waiting
* TODO: We should move away from using the recursive lock by default.
*/
class CCriticalSection : public AnnotatedMixin<boost::recursive_mutex>
{
public:
~CCriticalSection() {
DeleteLock((void*)this);
}
};
typedef CCriticalSection CDynamicCriticalSection;
/** Wrapped boost mutex: supports waiting but not recursive locking */
typedef AnnotatedMixin<boost::mutex> CWaitableCriticalSection;
/** Just a typedef for boost::condition_variable, can be wrapped later if desired */
typedef boost::condition_variable CConditionVariable;
#ifdef DEBUG_LOCKCONTENTION #ifdef DEBUG_LOCKCONTENTION
void PrintLockContention(const char* pszName, const char* pszFile, int nLine); void PrintLockContention(const char* pszName, const char* pszFile, int nLine);
#endif #endif

Loading…
Cancel
Save