in cachelib/allocator/MM2Q-inl.h [44:110]
bool MM2Q::Container<T, HookPtr>::recordAccess(T& node,
AccessMode mode) noexcept {
if ((mode == AccessMode::kWrite && !config_.updateOnWrite) ||
(mode == AccessMode::kRead && !config_.updateOnRead)) {
return false;
}
const auto curr = static_cast<Time>(util::getCurrentTimeSec());
// check if the node is still being memory managed
if (node.isInMMContainer() &&
((curr >= getUpdateTime(node) +
lruRefreshTime_.load(std::memory_order_relaxed)))) {
auto func = [&]() {
reconfigureLocked(curr);
if (!node.isInMMContainer()) {
return false;
}
if (isHot(node)) {
lru_.getList(LruType::Hot).moveToHead(node);
++numHotAccesses_;
} else if (isCold(node)) {
if (inTail(node)) {
unmarkTail(node);
lru_.getList(LruType::ColdTail).remove(node);
++numColdTailAccesses_;
} else {
lru_.getList(LruType::Cold).remove(node);
}
lru_.getList(LruType::Warm).linkAtHead(node);
unmarkCold(node);
++numColdAccesses_;
// only rebalance if config says so. recordAccess is called mostly on
// latency sensitive cache get operations.
if (config_.rebalanceOnRecordAccess) {
rebalance();
}
} else {
if (inTail(node)) {
unmarkTail(node);
lru_.getList(LruType::WarmTail).remove(node);
lru_.getList(LruType::Warm).linkAtHead(node);
++numWarmTailAccesses_;
} else {
lru_.getList(LruType::Warm).moveToHead(node);
}
++numWarmAccesses_;
}
setUpdateTime(node, curr);
return true;
};
// if the tryLockUpdate optimization is on, and we were able to grab the
// lock, execute the critical section and return true, else return false
//
// if the tryLockUpdate optimization is off, we always execute the critical
// section and return true
if (config_.tryLockUpdate) {
if (auto lck = LockHolder{*lruMutex_, std::try_to_lock}) {
return func();
}
return false;
}
return lruMutex_->lock_combine(func);
}
return false;
}