std::set MemoryAllocator::generateAllocSizes()

in cachelib/allocator/memory/MemoryAllocator.cpp [191:267]


std::set<uint32_t> MemoryAllocator::generateAllocSizes(
    double factor,
    uint32_t maxSize,
    uint32_t minSize,
    bool reduceFragmentation) {
  if (maxSize > Slab::kSize) {
    throw std::invalid_argument(
        folly::sformat("maximum alloc size {} is more than the slab size {}",
                       maxSize, Slab::kSize));
  }

  if (factor <= 1.0) {
    throw std::invalid_argument(folly::sformat("invalid factor {}", factor));
  }

  // Returns the next chunk size. Uses the previous size and factor to select a
  // size that increases the number of chunks per slab by at least one to reduce
  // slab wastage. Also increases the chunk size to the maximum that maintains
  // the same number of chunks per slab (for example: if slabs are 1 MB then a
  // chunk size of 300 KB can be upgraded to 333 KB while maintaining 3 chunks
  // per 1 MB slab).
  auto nextSize = [=](uint32_t prevSize, double incFactor) {
    // Increment by incFactor until we have a new number of chunks per slab.
    uint32_t newSize = prevSize;
    do {
      auto tmpPrevSize = newSize;
      newSize = util::getAlignedSize(static_cast<uint32_t>(newSize * incFactor),
                                     kAlignment);
      if (newSize == tmpPrevSize) {
        throw std::invalid_argument(
            folly::sformat("invalid incFactor {}", incFactor));
      }

      if (newSize > Slab::kSize) {
        return newSize;
      }
    } while (Slab::kSize / newSize == Slab::kSize / prevSize);
    // Now make sure we're selecting the maximum chunk size while maintaining
    // the number of chunks per slab.
    const uint32_t perSlab = static_cast<uint32_t>(Slab::kSize) / newSize;
    XDCHECK_GT(perSlab, 0ULL);
    const uint32_t maxChunkSize = static_cast<uint32_t>(Slab::kSize) / perSlab;
    // Align down to maintain perslab
    newSize = maxChunkSize - maxChunkSize % kAlignment;
    XDCHECK_EQ(newSize % kAlignment, 0ULL);
    XDCHECK_EQ(static_cast<uint32_t>(Slab::kSize) / newSize, perSlab);
    return newSize;
  };

  std::set<uint32_t> allocSizes;

  auto size = minSize;
  while (size < maxSize) {
    const auto nPerSlab = Slab::kSize / size;
    // if we can not make more than one alloc per slab, we just default to the
    // max alloc size.
    if (nPerSlab <= 1) {
      break;
    }
    allocSizes.insert(size);

    if (reduceFragmentation) {
      size = nextSize(size, factor);
    } else {
      auto prevSize = size;
      size = util::getAlignedSize(static_cast<uint32_t>(size * factor),
                                  kAlignment);
      if (prevSize == size) {
        throw std::invalid_argument(
            folly::sformat("invalid incFactor {}", factor));
      }
    }
  }

  allocSizes.insert(util::getAlignedSize(maxSize, kAlignment));
  return allocSizes;
}