in glean/rts/cache.cpp [235:310]
void LookupCache::insertOne(
Index& index,
Storage& storage,
Fact::unique_ptr owned,
Fact::intrusive_list& dead) {
const auto size = owned->size();
if (size > options.capacity) {
return;
}
// check if we already have a fact with this id in the cache
const Fact *existing = nullptr;
{
auto o = index.ids.find(owned->id());
if (o != index.ids.end()) {
existing = *o;
}
}
if (existing && existing->tag() >= owned->tag()) {
// the cached fact has same or more data than we are trying to insert
return;
}
if (existing || storage.factBytes() + size > options.capacity) {
// Do deferred LRU operations if we're going to evict or replace. We need
// to do this even when replacing because the hit buffers might reference
// the fact we're going to delete.
//
// NOTE: drain is "lossy" but in this case, we're running under a
// write lock for the index so there will be no concurrent drainers
// and we'll have had a memory barrier before - so drain isn't
// actually lossy here.
//
// This doesn't preserve LRU order as we process shards one by one,
// i.e., hits within one shard a processes in chronological order
// but we lose ordering across shards. It possible to fix this at the cost
// of some performance but probably not worth doing.
for (auto& t : touched) {
drain(storage, t);
}
if (existing) {
if (owned->tag() == KEY) {
++stats->values[Stats::idByKey_deletes];
} else {
++stats->values[Stats::factById_deletes];
}
deleteFromIndex(index, existing);
// TODO: defer this, see comments in evict
auto fact = storage.release(existing);
dead.push_back(*fact);
// dead assumed ownership of the fact
(void)fact.release();
}
if (storage.factBytes() + size > options.capacity) {
// shrink cache to ~90% of capacity
const auto wanted =
options.capacity * 0.9 > size ? options.capacity * 0.9 - size : 0;
evict(index, storage, wanted, dead);
}
}
const auto fact = storage.push_back(std::move(owned));
// For 'insert' (but not 'BulkStorage') we could unlock the storage here. It
// doesn't matter, though, since nothing will really use it without getting a
// lock for index first.
index.ids.insert(fact);
if (fact->tag() != TYPE) {
index.keys.insert(fact);
}
}