SNMALLOC_SLOW_PATH void dealloc_local_object_slow()

in src/mem/corealloc.h [400:465]


    SNMALLOC_SLOW_PATH void dealloc_local_object_slow(const MetaEntry& entry)
    {
      // TODO: Handle message queue on this path?

      Metaslab* meta = entry.get_metaslab();

      if (meta->is_large())
      {
        // Handle large deallocation here.
        size_t entry_sizeclass = entry.get_sizeclass().as_large();
        size_t size = bits::one_at_bit(entry_sizeclass);
        size_t slab_sizeclass =
          metaentry_chunk_sizeclass_to_slab_sizeclass(entry_sizeclass);

#ifdef SNMALLOC_TRACING
        std::cout << "Large deallocation: " << size
                  << " chunk sizeclass: " << slab_sizeclass << std::endl;
#else
        UNUSED(size);
#endif

        auto slab_record = reinterpret_cast<ChunkRecord*>(meta);

        ChunkAllocator::dealloc<SharedStateHandle>(
          get_backend_local_state(),
          chunk_local_state,
          slab_record,
          slab_sizeclass);

        return;
      }

      smallsizeclass_t sizeclass = entry.get_sizeclass().as_small();

      UNUSED(entropy);
      if (meta->is_sleeping())
      {
        // Slab has been woken up add this to the list of slabs with free space.

        //  Wake slab up.
        meta->set_not_sleeping(sizeclass);

        alloc_classes[sizeclass].available.insert(meta);
        alloc_classes[sizeclass].length++;

#ifdef SNMALLOC_TRACING
        std::cout << "Slab is woken up" << std::endl;
#endif

        ticker.check_tick();
        return;
      }

      alloc_classes[sizeclass].unused++;

      // If we have several slabs, and it isn't too expensive as a proportion
      // return to the global pool.
      if (
        (alloc_classes[sizeclass].unused > 2) &&
        (alloc_classes[sizeclass].unused >
         (alloc_classes[sizeclass].length >> 2)))
      {
        dealloc_local_slabs(sizeclass);
      }
      ticker.check_tick();
    }