in cachelib/allocator/memory/AllocationClass.cpp [213:319]
SlabReleaseContext AllocationClass::startSlabRelease(
SlabReleaseMode mode, const void* hint, SlabReleaseAbortFn shouldAbortFn) {
using LockHolder = std::unique_lock<std::mutex>;
LockHolder startSlabReleaseLockHolder(startSlabReleaseLock_);
const auto* hintSlab = slabAlloc_.getSlabForMemory(hint);
if (hint != nullptr && !slabAlloc_.isValidSlab(hintSlab)) {
throw std::invalid_argument(
folly::sformat("Invalid hint {} for slab release {}", hint, hintSlab));
}
const Slab* slab;
SlabHeader* header;
{
std::unique_lock<folly::DistributedMutex> l(*lock_);
// if a hint is provided, use it. If not, try to get a free/allocated slab.
slab = hint == nullptr ? getSlabForReleaseLocked() : hintSlab;
if (slab == nullptr) {
throw std::invalid_argument("Can not figure out a slab for release");
}
header = slabAlloc_.getSlabHeader(slab);
// slab header must be valid and NOT marked for release
if (header == nullptr || header->classId != getId() ||
header->poolId != getPoolId() || header->isMarkedForRelease()) {
throw std::invalid_argument(folly::sformat(
"Slab Header {} is in invalid state for release. id = {}, "
"markedForRelease = {}, classId = {}",
header, header == nullptr ? Slab::kInvalidClassId : header->classId,
header == nullptr ? false : header->isMarkedForRelease(), getId()));
}
// if its is a free slab, get it off the freeSlabs_ and return context
auto freeIt = std::find(freeSlabs_.begin(), freeSlabs_.end(), slab);
if (freeIt != freeSlabs_.end()) {
*freeIt = freeSlabs_.back();
freeSlabs_.pop_back();
header->classId = Slab::kInvalidClassId;
header->allocSize = 0;
return SlabReleaseContext{slab, header->poolId, header->classId, mode};
}
// The slab is actively used, so we create a new release alloc map
// and mark the slab for release
header->setMarkedForRelease(true);
createSlabReleaseAllocMapLocked(slab);
// remove this slab from the allocatedSlab_ if it exists.
auto allocIt =
std::find(allocatedSlabs_.begin(), allocatedSlabs_.end(), slab);
if (allocIt == allocatedSlabs_.end()) {
// not a part of free slabs and not part of allocated slab. This is an
// error, return to caller. This should not happen. throw a run time
// error.
throw std::runtime_error(
folly::sformat("Slab {} belongs to class {}. But its not present in "
"the free list or "
"allocated list.",
slab, getId()));
}
*allocIt = allocatedSlabs_.back();
allocatedSlabs_.pop_back();
// if slab is being carved currently, then update slabReleaseAllocMap
// allocState with free Allocs info, and then reset it
if (currSlab_ == slab) {
const auto it = slabReleaseAllocMap_.find(getSlabPtrValue(slab));
auto& allocState = it->second;
XDCHECK_EQ(allocState.size(), getAllocsPerSlab());
for (size_t i = currOffset_ / allocationSize_; i < allocState.size();
i++) {
allocState[i] = true;
}
currSlab_ = nullptr;
currOffset_ = 0;
}
} // alloc lock scope
auto results = pruneFreeAllocs(slab, shouldAbortFn);
if (results.first) {
lock_->lock_combine([&]() {
header->setMarkedForRelease(false);
slabReleaseAllocMap_.erase(getSlabPtrValue(slab));
});
throw exception::SlabReleaseAborted(
folly::sformat("Slab Release aborted "
"during pruning free allocs. Slab address: {}",
slab));
}
std::vector<void*> activeAllocations = std::move(results.second);
return lock_->lock_combine([&]() {
if (activeAllocations.empty()) {
header->classId = Slab::kInvalidClassId;
header->allocSize = 0;
header->setMarkedForRelease(false);
// no active allocations to be freed back. We can consider this slab as
// released from this AllocationClass. This means we also do not need
// to keep the slabFreeState
slabReleaseAllocMap_.erase(getSlabPtrValue(slab));
return SlabReleaseContext{slab, header->poolId, header->classId, mode};
} else {
++activeReleases_;
return SlabReleaseContext{slab, header->poolId, header->classId,
std::move(activeAllocations), mode};
}
});
}