in cachelib/allocator/MMLru-inl.h [44:94]
bool MMLru::Container<T, HookPtr>::recordAccess(T& node,
AccessMode mode) noexcept {
if ((mode == AccessMode::kWrite && !config_.updateOnWrite) ||
(mode == AccessMode::kRead && !config_.updateOnRead)) {
return false;
}
const auto curr = static_cast<Time>(util::getCurrentTimeSec());
// check if the node is still being memory managed
if (node.isInMMContainer() &&
((curr >= getUpdateTime(node) +
lruRefreshTime_.load(std::memory_order_relaxed)) ||
!isAccessed(node))) {
if (!isAccessed(node)) {
markAccessed(node);
}
auto func = [this, &node, curr]() {
reconfigureLocked(curr);
ensureNotInsertionPoint(node);
if (node.isInMMContainer()) {
lru_.moveToHead(node);
setUpdateTime(node, curr);
}
if (isTail(node)) {
unmarkTail(node);
tailSize_--;
XDCHECK_LE(0u, tailSize_);
updateLruInsertionPoint();
}
};
// if the tryLockUpdate optimization is on, and we were able to grab the
// lock, execute the critical section and return true, else return false
//
// if the tryLockUpdate optimization is off, we always execute the
// critical section and return true
if (config_.tryLockUpdate) {
if (auto lck = LockHolder{*lruMutex_, std::try_to_lock}) {
func();
return true;
}
return false;
}
lruMutex_->lock_combine(func);
return true;
}
return false;
}