in src/mem/corealloc.h [352:392]
SNMALLOC_SLOW_PATH void dealloc_local_slabs(smallsizeclass_t sizeclass)
{
// Return unused slabs of sizeclass_t back to global allocator
alloc_classes[sizeclass].available.filter([this,
sizeclass](Metaslab* meta) {
auto domesticate =
[this](freelist::QueuePtr p) SNMALLOC_FAST_PATH_LAMBDA {
auto res =
capptr_domesticate<SharedStateHandle>(backend_state_ptr(), p);
#ifdef SNMALLOC_TRACING
if (res.unsafe_ptr() != p.unsafe_ptr())
printf(
"Domesticated %p to %p!\n", p.unsafe_ptr(), res.unsafe_ptr());
#endif
return res;
};
if (meta->needed() != 0)
{
if (check_slabs)
{
meta->free_queue.validate(entropy.get_free_list_key(), domesticate);
}
return false;
}
alloc_classes[sizeclass].length--;
alloc_classes[sizeclass].unused--;
// TODO delay the clear to the next user of the slab, or teardown so
// don't touch the cache lines at this point in snmalloc_check_client.
auto chunk_record = clear_slab(meta, sizeclass);
ChunkAllocator::dealloc<SharedStateHandle>(
get_backend_local_state(),
chunk_local_state,
chunk_record,
sizeclass_to_slab_sizeclass(sizeclass));
return true;
});
}