in src/mem/corealloc.h [831:876]
bool flush(bool destroy_queue = false)
{
SNMALLOC_ASSERT(attached_cache != nullptr);
auto local_state = backend_state_ptr();
auto domesticate =
[local_state](freelist::QueuePtr p) SNMALLOC_FAST_PATH_LAMBDA {
return capptr_domesticate<SharedStateHandle>(local_state, p);
};
if (destroy_queue)
{
auto p_wild = message_queue().destroy();
auto p_tame = domesticate(p_wild);
while (p_tame != nullptr)
{
bool need_post = true; // Always going to post, so ignore.
auto n_tame = p_tame->atomic_read_next(key_global, domesticate);
auto& entry = SharedStateHandle::Pagemap::get_metaentry(
backend_state_ptr(), snmalloc::address_cast(p_tame));
handle_dealloc_remote(entry, p_tame.as_void(), need_post);
p_tame = n_tame;
}
}
else
{
// Process incoming message queue
// Loop as normally only processes a batch
while (has_messages())
handle_message_queue([]() {});
}
auto posted =
attached_cache->flush<sizeof(CoreAllocator), SharedStateHandle>(
backend_state_ptr(),
[&](capptr::Alloc<void> p) { dealloc_local_object(p); });
// We may now have unused slabs, return to the global allocator.
for (smallsizeclass_t sizeclass = 0; sizeclass < NUM_SMALL_SIZECLASSES;
sizeclass++)
{
dealloc_local_slabs<true>(sizeclass);
}
return posted;
}