in cachelib/allocator/memory/AllocationClass.h [158:207]
bool forEachAllocation(Slab* slab, AllocTraversalFn&& callback) {
// Take a try_lock on this allocation class beginning any new slab release.
std::unique_lock<std::mutex> startSlabReleaseLockHolder(
startSlabReleaseLock_, std::defer_lock);
// If the try_lock fails, skip this slab
if (!startSlabReleaseLockHolder.try_lock()) {
return true;
}
// check for the header to be valid.
using Return = folly::Optional<AllocInfo>;
auto allocInfo = lock_->lock_combine([this, slab]() -> Return {
auto slabHdr = slabAlloc_.getSlabHeader(slab);
if (!slabHdr || slabHdr->classId != classId_ ||
slabHdr->poolId != poolId_ || slabHdr->isAdvised() ||
slabHdr->isMarkedForRelease()) {
return folly::none;
}
return Return{{slabHdr->poolId, slabHdr->classId, slabHdr->allocSize}};
});
if (!allocInfo) {
return true;
}
// Prefetch the first kForEachAllocPrefetchPffset items in the slab.
// Note that the prefetch is for read with no temporal locality.
void* prefetchOffsetPtr = reinterpret_cast<void*>(slab);
for (unsigned int i = 0; i < kForEachAllocPrefetchOffset; i++) {
prefetchOffsetPtr = reinterpret_cast<void*>(
reinterpret_cast<uintptr_t>(prefetchOffsetPtr) + allocationSize_);
__builtin_prefetch(prefetchOffsetPtr, 0, 0);
}
void* ptr = reinterpret_cast<void*>(slab);
unsigned int allocsPerSlab = getAllocsPerSlab();
for (unsigned int i = 0; i < allocsPerSlab; ++i) {
prefetchOffsetPtr = reinterpret_cast<void*>(
reinterpret_cast<uintptr_t>(prefetchOffsetPtr) + allocationSize_);
// Prefetch ahead the kForEachAllocPrefetchOffset item.
__builtin_prefetch(prefetchOffsetPtr, 0, 0);
if (!callback(ptr, allocInfo.value())) {
return false;
}
ptr = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(ptr) +
allocationSize_);
}
return true;
}