in cachelib/compact_cache/CCache-inl.h [169:262]
void CompactCache<C, A, B>::tableRehash(size_t oldNumChunks,
size_t newNumChunks,
RehashOperation op) {
XDCHECK_LE(newNumChunks, allocator_.getNumChunks());
XDCHECK_GT(newNumChunks, 0u);
XDCHECK_GT(oldNumChunks, 0u);
/* Loop through all entries in all buckets of the hash table. */
for (size_t n = 0; n < oldNumChunks; n++) {
Bucket* table_chunk = reinterpret_cast<Bucket*>(allocator_.getChunk(n));
for (size_t i = 0; i < bucketsPerChunk_; i++) {
Bucket* bucket = &table_chunk[i];
auto lock = locks_.lockExclusive(bucket);
const size_t capacity = BucketDescriptor::nEntriesCapacity(*bucket);
/* When expanding the cache (newNumChunks > oldNumChunks) move
* the entire capacity elements over.
* When shrinking cache to some fraction of its former size,
* move that fraction of the to-be-deleted chunks in order to
* maintain a non-zero cache lifetime throughout */
const size_t max_move = std::min(
std::max((newNumChunks * capacity) / oldNumChunks, (size_t)1),
capacity);
size_t moved = 0;
EntryHandle entry = BucketDescriptor::first(&table_chunk[i]);
while (entry) {
const bool valid_entry = !validCb_ || validCb_(entry.key());
auto remove_context =
valid_entry ? RemoveContext::kEviction : RemoveContext::kNormal;
Bucket* new_chunk = tableFindChunk(newNumChunks, entry.key());
if (new_chunk == table_chunk) {
entry.next();
continue;
}
/* Moving takes two forms. If newNumChunks is bigger, we move
* everything to its new place in the newly allocated chunks.
* If smaller, we need to preserve read-after-write so we move
* a small initial fraction of the newly lost buckets to their
* new home, putting them at the head of the LRU.
*
* This occurs in two stages; first we insert the to-be-moved
* entries into their new home and call the removal callbacks
* for invalid or excess ones.
*
* Second, after reads have moved, we delete the old entries
* This is done in a new cohort to ensure reads never see
* a temporarily disappeared entry in cache.
*/
if (op == RehashOperation::COPY) {
if (valid_entry && moved < max_move) {
/* Add new entry. Offset is the same, so no need to
* re-compute that hash
*/
Bucket* newBucket = &new_chunk[i];
// lock ordering is established by the hash function;
// old hash -> new hash
// However there can be arbitrary hash collisions, so
// don't relock the same lock (we don't use recursive
// locks in general)
bool sameLock = locks_.isSameLock(newBucket, bucket);
auto higher_lock =
sameLock ? CCWriteHolder() : locks_.lockExclusive(newBucket);
if (kHasValues) {
if (kValuesFixedSize) {
bucketSet(newBucket, entry.key(), entry.val());
} else {
bucketSet(newBucket, entry.key(), entry.val(), entry.size());
}
} else {
bucketSet(newBucket, entry.key());
}
moved++;
} else {
// not moving this one, either invalid or we're full
// call evict (or delete if invalid) callback
if (removeCb_) {
detail::callRemoveCb<SelfType>(
removeCb_, entry.key(), entry.val(), remove_context);
}
}
entry.next();
} else {
// on the second pass we run the deletes, to avoid
// read requests seeing no data in the interim
// actually delete here
BucketDescriptor::del(entry);
// no entry.next() call as del advances ptr
}
}
XDCHECK(newNumChunks <= oldNumChunks || !entry);
}
}
}