in absl/synchronization/mutex.cc [1914:2028]
void Mutex::LockSlowLoop(SynchWaitParams *waitp, int flags) {
SchedulingGuard::ScopedDisable disable_rescheduling;
int c = 0;
intptr_t v = mu_.load(std::memory_order_relaxed);
if ((v & kMuEvent) != 0) {
PostSynchEvent(this,
waitp->how == kExclusive? SYNCH_EV_LOCK: SYNCH_EV_READERLOCK);
}
ABSL_RAW_CHECK(
waitp->thread->waitp == nullptr || waitp->thread->suppress_fatal_errors,
"detected illegal recursion into Mutex code");
for (;;) {
v = mu_.load(std::memory_order_relaxed);
CheckForMutexCorruption(v, "Lock");
if ((v & waitp->how->slow_need_zero) == 0) {
if (mu_.compare_exchange_strong(
v, (waitp->how->fast_or |
(v & zap_desig_waker[flags & kMuHasBlocked])) +
waitp->how->fast_add,
std::memory_order_acquire, std::memory_order_relaxed)) {
if (waitp->cond == nullptr ||
EvalConditionAnnotated(waitp->cond, this, true, false,
waitp->how == kShared)) {
break; // we timed out, or condition true, so return
}
this->UnlockSlow(waitp); // got lock but condition false
this->Block(waitp->thread);
flags |= kMuHasBlocked;
c = 0;
}
} else { // need to access waiter list
bool dowait = false;
if ((v & (kMuSpin|kMuWait)) == 0) { // no waiters
// This thread tries to become the one and only waiter.
PerThreadSynch *new_h = Enqueue(nullptr, waitp, v, flags);
intptr_t nv = (v & zap_desig_waker[flags & kMuHasBlocked] & kMuLow) |
kMuWait;
ABSL_RAW_CHECK(new_h != nullptr, "Enqueue to empty list failed");
if (waitp->how == kExclusive && (v & kMuReader) != 0) {
nv |= kMuWrWait;
}
if (mu_.compare_exchange_strong(
v, reinterpret_cast<intptr_t>(new_h) | nv,
std::memory_order_release, std::memory_order_relaxed)) {
dowait = true;
} else { // attempted Enqueue() failed
// zero out the waitp field set by Enqueue()
waitp->thread->waitp = nullptr;
}
} else if ((v & waitp->how->slow_inc_need_zero &
ignore_waiting_writers[flags & kMuHasBlocked]) == 0) {
// This is a reader that needs to increment the reader count,
// but the count is currently held in the last waiter.
if (mu_.compare_exchange_strong(
v, (v & zap_desig_waker[flags & kMuHasBlocked]) | kMuSpin |
kMuReader,
std::memory_order_acquire, std::memory_order_relaxed)) {
PerThreadSynch *h = GetPerThreadSynch(v);
h->readers += kMuOne; // inc reader count in waiter
do { // release spinlock
v = mu_.load(std::memory_order_relaxed);
} while (!mu_.compare_exchange_weak(v, (v & ~kMuSpin) | kMuReader,
std::memory_order_release,
std::memory_order_relaxed));
if (waitp->cond == nullptr ||
EvalConditionAnnotated(waitp->cond, this, true, false,
waitp->how == kShared)) {
break; // we timed out, or condition true, so return
}
this->UnlockSlow(waitp); // got lock but condition false
this->Block(waitp->thread);
flags |= kMuHasBlocked;
c = 0;
}
} else if ((v & kMuSpin) == 0 && // attempt to queue ourselves
mu_.compare_exchange_strong(
v, (v & zap_desig_waker[flags & kMuHasBlocked]) | kMuSpin |
kMuWait,
std::memory_order_acquire, std::memory_order_relaxed)) {
PerThreadSynch *h = GetPerThreadSynch(v);
PerThreadSynch *new_h = Enqueue(h, waitp, v, flags);
intptr_t wr_wait = 0;
ABSL_RAW_CHECK(new_h != nullptr, "Enqueue to list failed");
if (waitp->how == kExclusive && (v & kMuReader) != 0) {
wr_wait = kMuWrWait; // give priority to a waiting writer
}
do { // release spinlock
v = mu_.load(std::memory_order_relaxed);
} while (!mu_.compare_exchange_weak(
v, (v & (kMuLow & ~kMuSpin)) | kMuWait | wr_wait |
reinterpret_cast<intptr_t>(new_h),
std::memory_order_release, std::memory_order_relaxed));
dowait = true;
}
if (dowait) {
this->Block(waitp->thread); // wait until removed from list or timeout
flags |= kMuHasBlocked;
c = 0;
}
}
ABSL_RAW_CHECK(
waitp->thread->waitp == nullptr || waitp->thread->suppress_fatal_errors,
"detected illegal recursion into Mutex code");
// delay, then try again
c = synchronization_internal::MutexDelay(c, GENTLE);
}
ABSL_RAW_CHECK(
waitp->thread->waitp == nullptr || waitp->thread->suppress_fatal_errors,
"detected illegal recursion into Mutex code");
if ((v & kMuEvent) != 0) {
PostSynchEvent(this,
waitp->how == kExclusive? SYNCH_EV_LOCK_RETURNING :
SYNCH_EV_READERLOCK_RETURNING);
}
}