summaryrefslogtreecommitdiff
path: root/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_mutex.h
diff options
context:
space:
mode:
Diffstat (limited to 'gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_mutex.h')
-rw-r--r--gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_mutex.h281
1 files changed, 120 insertions, 161 deletions
diff --git a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_mutex.h b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_mutex.h
index cbd1c25eb69..b1a58e421d8 100644
--- a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_mutex.h
+++ b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_mutex.h
@@ -20,25 +20,27 @@
namespace __sanitizer {
-class MUTEX StaticSpinMutex {
+class SANITIZER_MUTEX StaticSpinMutex {
public:
void Init() {
atomic_store(&state_, 0, memory_order_relaxed);
}
- void Lock() ACQUIRE() {
+ void Lock() SANITIZER_ACQUIRE() {
if (LIKELY(TryLock()))
return;
LockSlow();
}
- bool TryLock() TRY_ACQUIRE(true) {
+ bool TryLock() SANITIZER_TRY_ACQUIRE(true) {
return atomic_exchange(&state_, 1, memory_order_acquire) == 0;
}
- void Unlock() RELEASE() { atomic_store(&state_, 0, memory_order_release); }
+ void Unlock() SANITIZER_RELEASE() {
+ atomic_store(&state_, 0, memory_order_release);
+ }
- void CheckLocked() const CHECK_LOCKED() {
+ void CheckLocked() const SANITIZER_CHECK_LOCKED() {
CHECK_EQ(atomic_load(&state_, memory_order_relaxed), 1);
}
@@ -48,7 +50,7 @@ class MUTEX StaticSpinMutex {
void LockSlow();
};
-class MUTEX SpinMutex : public StaticSpinMutex {
+class SANITIZER_MUTEX SpinMutex : public StaticSpinMutex {
public:
SpinMutex() {
Init();
@@ -95,7 +97,11 @@ enum {
// Go linker does not support THREADLOCAL variables,
// so we can't use per-thread state.
-#define SANITIZER_CHECK_DEADLOCKS (SANITIZER_DEBUG && !SANITIZER_GO)
+// Disable checked locks on Darwin. Although Darwin platforms support
+// THREADLOCAL variables they are not usable early on during process init when
+// `__sanitizer::Mutex` is used.
+#define SANITIZER_CHECK_DEADLOCKS \
+ (SANITIZER_DEBUG && !SANITIZER_GO && SANITIZER_SUPPORTS_THREADLOCAL && !SANITIZER_APPLE)
#if SANITIZER_CHECK_DEADLOCKS
struct MutexMeta {
@@ -111,7 +117,7 @@ struct MutexMeta {
class CheckedMutex {
public:
- constexpr CheckedMutex(MutexType type)
+ explicit constexpr CheckedMutex(MutexType type)
#if SANITIZER_CHECK_DEADLOCKS
: type_(type)
#endif
@@ -152,15 +158,15 @@ class CheckedMutex {
// Derive from CheckedMutex for the purposes of EBO.
// We could make it a field marked with [[no_unique_address]],
// but this attribute is not supported by some older compilers.
-class MUTEX Mutex : CheckedMutex {
+class SANITIZER_MUTEX Mutex : CheckedMutex {
public:
- constexpr Mutex(MutexType type = MutexUnchecked) : CheckedMutex(type) {}
+ explicit constexpr Mutex(MutexType type = MutexUnchecked)
+ : CheckedMutex(type) {}
- void Lock() ACQUIRE() {
+ void Lock() SANITIZER_ACQUIRE() {
CheckedMutex::Lock();
u64 reset_mask = ~0ull;
u64 state = atomic_load_relaxed(&state_);
- const uptr kMaxSpinIters = 1500;
for (uptr spin_iters = 0;; spin_iters++) {
u64 new_state;
bool locked = (state & (kWriterLock | kReaderLockMask)) != 0;
@@ -189,8 +195,6 @@ class MUTEX Mutex : CheckedMutex {
// We've incremented waiting writers, so now block.
writers_.Wait();
spin_iters = 0;
- state = atomic_load(&state_, memory_order_relaxed);
- DCHECK_NE(state & kWriterSpinWait, 0);
} else {
// We've set kWriterSpinWait, but we are still in active spinning.
}
@@ -199,10 +203,26 @@ class MUTEX Mutex : CheckedMutex {
// Either way we need to reset kWriterSpinWait
// next time we take the lock or block again.
reset_mask = ~kWriterSpinWait;
+ state = atomic_load(&state_, memory_order_relaxed);
+ DCHECK_NE(state & kWriterSpinWait, 0);
+ }
+ }
+
+ bool TryLock() SANITIZER_TRY_ACQUIRE(true) {
+ u64 state = atomic_load_relaxed(&state_);
+ for (;;) {
+ if (UNLIKELY(state & (kWriterLock | kReaderLockMask)))
+ return false;
+ // The mutex is not read-/write-locked, try to lock.
+ if (LIKELY(atomic_compare_exchange_weak(
+ &state_, &state, state | kWriterLock, memory_order_acquire))) {
+ CheckedMutex::Lock();
+ return true;
+ }
}
}
- void Unlock() RELEASE() {
+ void Unlock() SANITIZER_RELEASE() {
CheckedMutex::Unlock();
bool wake_writer;
u64 wake_readers;
@@ -212,17 +232,16 @@ class MUTEX Mutex : CheckedMutex {
DCHECK_NE(state & kWriterLock, 0);
DCHECK_EQ(state & kReaderLockMask, 0);
new_state = state & ~kWriterLock;
- wake_writer =
- (state & kWriterSpinWait) == 0 && (state & kWaitingWriterMask) != 0;
+ wake_writer = (state & (kWriterSpinWait | kReaderSpinWait)) == 0 &&
+ (state & kWaitingWriterMask) != 0;
if (wake_writer)
new_state = (new_state - kWaitingWriterInc) | kWriterSpinWait;
wake_readers =
- (state & (kWriterSpinWait | kWaitingWriterMask)) != 0
+ wake_writer || (state & kWriterSpinWait) != 0
? 0
: ((state & kWaitingReaderMask) >> kWaitingReaderShift);
if (wake_readers)
- new_state = (new_state & ~kWaitingReaderMask) +
- (wake_readers << kReaderLockShift);
+ new_state = (new_state & ~kWaitingReaderMask) | kReaderSpinWait;
} while (UNLIKELY(!atomic_compare_exchange_weak(&state_, &state, new_state,
memory_order_release)));
if (UNLIKELY(wake_writer))
@@ -231,37 +250,54 @@ class MUTEX Mutex : CheckedMutex {
readers_.Post(wake_readers);
}
- void ReadLock() ACQUIRE_SHARED() {
+ void ReadLock() SANITIZER_ACQUIRE_SHARED() {
CheckedMutex::Lock();
- bool locked;
- u64 new_state;
+ u64 reset_mask = ~0ull;
u64 state = atomic_load_relaxed(&state_);
- do {
- locked =
- (state & kReaderLockMask) == 0 &&
- (state & (kWriterLock | kWriterSpinWait | kWaitingWriterMask)) != 0;
+ for (uptr spin_iters = 0;; spin_iters++) {
+ bool locked = (state & kWriterLock) != 0;
+ u64 new_state;
+ if (LIKELY(!locked)) {
+ new_state = (state + kReaderLockInc) & reset_mask;
+ } else if (spin_iters > kMaxSpinIters) {
+ new_state = (state + kWaitingReaderInc) & reset_mask;
+ } else if ((state & kReaderSpinWait) == 0) {
+ // Active spinning, but denote our presence so that unlocking
+ // thread does not wake up other threads.
+ new_state = state | kReaderSpinWait;
+ } else {
+ // Active spinning.
+ state = atomic_load(&state_, memory_order_relaxed);
+ continue;
+ }
+ if (UNLIKELY(!atomic_compare_exchange_weak(&state_, &state, new_state,
+ memory_order_acquire)))
+ continue;
if (LIKELY(!locked))
- new_state = state + kReaderLockInc;
- else
- new_state = state + kWaitingReaderInc;
- } while (UNLIKELY(!atomic_compare_exchange_weak(&state_, &state, new_state,
- memory_order_acquire)));
- if (UNLIKELY(locked))
- readers_.Wait();
- DCHECK_EQ(atomic_load_relaxed(&state_) & kWriterLock, 0);
- DCHECK_NE(atomic_load_relaxed(&state_) & kReaderLockMask, 0);
+ return; // We've locked the mutex.
+ if (spin_iters > kMaxSpinIters) {
+ // We've incremented waiting readers, so now block.
+ readers_.Wait();
+ spin_iters = 0;
+ } else {
+ // We've set kReaderSpinWait, but we are still in active spinning.
+ }
+ reset_mask = ~kReaderSpinWait;
+ state = atomic_load(&state_, memory_order_relaxed);
+ }
}
- void ReadUnlock() RELEASE_SHARED() {
+ void ReadUnlock() SANITIZER_RELEASE_SHARED() {
CheckedMutex::Unlock();
bool wake;
u64 new_state;
u64 state = atomic_load_relaxed(&state_);
do {
DCHECK_NE(state & kReaderLockMask, 0);
- DCHECK_EQ(state & (kWaitingReaderMask | kWriterLock), 0);
+ DCHECK_EQ(state & kWriterLock, 0);
new_state = state - kReaderLockInc;
- wake = (new_state & (kReaderLockMask | kWriterSpinWait)) == 0 &&
+ wake = (new_state &
+ (kReaderLockMask | kWriterSpinWait | kReaderSpinWait)) == 0 &&
(new_state & kWaitingWriterMask) != 0;
if (wake)
new_state = (new_state - kWaitingWriterInc) | kWriterSpinWait;
@@ -277,13 +313,13 @@ class MUTEX Mutex : CheckedMutex {
// owns the mutex but a child checks that it is locked. Rather than
// maintaining complex state to work around those situations, the check only
// checks that the mutex is owned.
- void CheckWriteLocked() const CHECK_LOCKED() {
+ void CheckWriteLocked() const SANITIZER_CHECK_LOCKED() {
CHECK(atomic_load(&state_, memory_order_relaxed) & kWriterLock);
}
- void CheckLocked() const CHECK_LOCKED() { CheckWriteLocked(); }
+ void CheckLocked() const SANITIZER_CHECK_LOCKED() { CheckWriteLocked(); }
- void CheckReadLocked() const CHECK_LOCKED() {
+ void CheckReadLocked() const SANITIZER_CHECK_LOCKED() {
CHECK(atomic_load(&state_, memory_order_relaxed) & kReaderLockMask);
}
@@ -305,16 +341,14 @@ class MUTEX Mutex : CheckedMutex {
// - a writer is awake and spin-waiting
// the flag is used to prevent thundering herd problem
// (new writers are not woken if this flag is set)
+ // - a reader is awake and spin-waiting
//
- // Writer support active spinning, readers does not.
+ // Both writers and readers use active spinning before blocking.
// But readers are more aggressive and always take the mutex
// if there are any other readers.
- // Writers hand off the mutex to readers: after wake up readers
- // already assume ownership of the mutex (don't need to do any
- // state updates). But the mutex is not handed off to writers,
- // after wake up writers compete to lock the mutex again.
- // This is needed to allow repeated write locks even in presence
- // of other blocked writers.
+ // After wake up both writers and readers compete to lock the
+ // mutex again. This is needed to allow repeated locks even in presence
+ // of other blocked threads.
static constexpr u64 kCounterWidth = 20;
static constexpr u64 kReaderLockShift = 0;
static constexpr u64 kReaderLockInc = 1ull << kReaderLockShift;
@@ -330,7 +364,11 @@ class MUTEX Mutex : CheckedMutex {
<< kWaitingWriterShift;
static constexpr u64 kWriterLock = 1ull << (3 * kCounterWidth);
static constexpr u64 kWriterSpinWait = 1ull << (3 * kCounterWidth + 1);
+ static constexpr u64 kReaderSpinWait = 1ull << (3 * kCounterWidth + 2);
+
+ static constexpr uptr kMaxSpinIters = 1500;
+ Mutex(LinkerInitialized) = delete;
Mutex(const Mutex &) = delete;
void operator=(const Mutex &) = delete;
};
@@ -338,149 +376,70 @@ class MUTEX Mutex : CheckedMutex {
void FutexWait(atomic_uint32_t *p, u32 cmp);
void FutexWake(atomic_uint32_t *p, u32 count);
-class MUTEX BlockingMutex {
- public:
- explicit constexpr BlockingMutex(LinkerInitialized)
- : opaque_storage_ {0, }, owner_ {0} {}
- BlockingMutex();
- void Lock() ACQUIRE();
- void Unlock() RELEASE();
-
- // This function does not guarantee an explicit check that the calling thread
- // is the thread which owns the mutex. This behavior, while more strictly
- // correct, causes problems in cases like StopTheWorld, where a parent thread
- // owns the mutex but a child checks that it is locked. Rather than
- // maintaining complex state to work around those situations, the check only
- // checks that the mutex is owned, and assumes callers to be generally
- // well-behaved.
- void CheckLocked() const CHECK_LOCKED();
-
- private:
- // Solaris mutex_t has a member that requires 64-bit alignment.
- ALIGNED(8) uptr opaque_storage_[10];
- uptr owner_; // for debugging
-};
-
-// Reader-writer spin mutex.
-class MUTEX RWMutex {
+template <typename MutexType>
+class SANITIZER_SCOPED_LOCK GenericScopedLock {
public:
- RWMutex() {
- atomic_store(&state_, kUnlocked, memory_order_relaxed);
- }
-
- ~RWMutex() {
- CHECK_EQ(atomic_load(&state_, memory_order_relaxed), kUnlocked);
- }
-
- void Lock() ACQUIRE() {
- u32 cmp = kUnlocked;
- if (atomic_compare_exchange_strong(&state_, &cmp, kWriteLock,
- memory_order_acquire))
- return;
- LockSlow();
- }
-
- void Unlock() RELEASE() {
- u32 prev = atomic_fetch_sub(&state_, kWriteLock, memory_order_release);
- DCHECK_NE(prev & kWriteLock, 0);
- (void)prev;
- }
-
- void ReadLock() ACQUIRE_SHARED() {
- u32 prev = atomic_fetch_add(&state_, kReadLock, memory_order_acquire);
- if ((prev & kWriteLock) == 0)
- return;
- ReadLockSlow();
- }
-
- void ReadUnlock() RELEASE_SHARED() {
- u32 prev = atomic_fetch_sub(&state_, kReadLock, memory_order_release);
- DCHECK_EQ(prev & kWriteLock, 0);
- DCHECK_GT(prev & ~kWriteLock, 0);
- (void)prev;
+ explicit GenericScopedLock(MutexType *mu) SANITIZER_ACQUIRE(mu) : mu_(mu) {
+ mu_->Lock();
}
- void CheckLocked() const CHECK_LOCKED() {
- CHECK_NE(atomic_load(&state_, memory_order_relaxed), kUnlocked);
- }
+ ~GenericScopedLock() SANITIZER_RELEASE() { mu_->Unlock(); }
private:
- atomic_uint32_t state_;
-
- enum {
- kUnlocked = 0,
- kWriteLock = 1,
- kReadLock = 2
- };
-
- void NOINLINE LockSlow() {
- for (int i = 0;; i++) {
- if (i < 10)
- proc_yield(10);
- else
- internal_sched_yield();
- u32 cmp = atomic_load(&state_, memory_order_relaxed);
- if (cmp == kUnlocked &&
- atomic_compare_exchange_weak(&state_, &cmp, kWriteLock,
- memory_order_acquire))
- return;
- }
- }
-
- void NOINLINE ReadLockSlow() {
- for (int i = 0;; i++) {
- if (i < 10)
- proc_yield(10);
- else
- internal_sched_yield();
- u32 prev = atomic_load(&state_, memory_order_acquire);
- if ((prev & kWriteLock) == 0)
- return;
- }
- }
+ MutexType *mu_;
- RWMutex(const RWMutex &) = delete;
- void operator=(const RWMutex &) = delete;
+ GenericScopedLock(const GenericScopedLock &) = delete;
+ void operator=(const GenericScopedLock &) = delete;
};
template <typename MutexType>
-class SCOPED_LOCK GenericScopedLock {
+class SANITIZER_SCOPED_LOCK GenericScopedReadLock {
public:
- explicit GenericScopedLock(MutexType *mu) ACQUIRE(mu) : mu_(mu) {
- mu_->Lock();
+ explicit GenericScopedReadLock(MutexType *mu) SANITIZER_ACQUIRE(mu)
+ : mu_(mu) {
+ mu_->ReadLock();
}
- ~GenericScopedLock() RELEASE() { mu_->Unlock(); }
+ ~GenericScopedReadLock() SANITIZER_RELEASE() { mu_->ReadUnlock(); }
private:
MutexType *mu_;
- GenericScopedLock(const GenericScopedLock &) = delete;
- void operator=(const GenericScopedLock &) = delete;
+ GenericScopedReadLock(const GenericScopedReadLock &) = delete;
+ void operator=(const GenericScopedReadLock &) = delete;
};
template <typename MutexType>
-class SCOPED_LOCK GenericScopedReadLock {
+class SANITIZER_SCOPED_LOCK GenericScopedRWLock {
public:
- explicit GenericScopedReadLock(MutexType *mu) ACQUIRE(mu) : mu_(mu) {
- mu_->ReadLock();
+ ALWAYS_INLINE explicit GenericScopedRWLock(MutexType *mu, bool write)
+ SANITIZER_ACQUIRE(mu)
+ : mu_(mu), write_(write) {
+ if (write_)
+ mu_->Lock();
+ else
+ mu_->ReadLock();
}
- ~GenericScopedReadLock() RELEASE() { mu_->ReadUnlock(); }
+ ALWAYS_INLINE ~GenericScopedRWLock() SANITIZER_RELEASE() {
+ if (write_)
+ mu_->Unlock();
+ else
+ mu_->ReadUnlock();
+ }
private:
MutexType *mu_;
+ bool write_;
- GenericScopedReadLock(const GenericScopedReadLock &) = delete;
- void operator=(const GenericScopedReadLock &) = delete;
+ GenericScopedRWLock(const GenericScopedRWLock &) = delete;
+ void operator=(const GenericScopedRWLock &) = delete;
};
typedef GenericScopedLock<StaticSpinMutex> SpinMutexLock;
-typedef GenericScopedLock<BlockingMutex> BlockingMutexLock;
-typedef GenericScopedLock<RWMutex> RWMutexLock;
-typedef GenericScopedReadLock<RWMutex> RWMutexReadLock;
typedef GenericScopedLock<Mutex> Lock;
typedef GenericScopedReadLock<Mutex> ReadLock;
+typedef GenericScopedRWLock<Mutex> RWLock;
} // namespace __sanitizer