in sparseconvnet/SCN/Metadata/sparsehash/internal/densehashtable.h [571:617]
bool resize_delta(size_type delta) {
bool did_resize = false;
if ( settings.consider_shrink() ) { // see if lots of deletes happened
if ( maybe_shrink() )
did_resize = true;
}
if (num_elements >=
(std::numeric_limits<size_type>::max)() - delta) {
throw std::length_error("resize overflow");
}
if ( bucket_count() >= HT_MIN_BUCKETS &&
(num_elements + delta) <= settings.enlarge_threshold() )
return did_resize; // we're ok as we are
// Sometimes, we need to resize just to get rid of all the
// "deleted" buckets that are clogging up the hashtable. So when
// deciding whether to resize, count the deleted buckets (which
// are currently taking up room). But later, when we decide what
// size to resize to, *don't* count deleted buckets, since they
// get discarded during the resize.
const size_type needed_size = settings.min_buckets(num_elements + delta, 0);
if ( needed_size <= bucket_count() ) // we have enough buckets
return did_resize;
size_type resize_to =
settings.min_buckets(num_elements - num_deleted + delta, bucket_count());
if (resize_to < needed_size && // may double resize_to
resize_to < (std::numeric_limits<size_type>::max)() / 2) {
// This situation means that we have enough deleted elements,
// that once we purge them, we won't actually have needed to
// grow. But we may want to grow anyway: if we just purge one
// element, say, we'll have to grow anyway next time we
// insert. Might as well grow now, since we're already going
// through the trouble of copying (in order to purge the
// deleted elements).
const size_type target =
static_cast<size_type>(settings.shrink_size(resize_to*2));
if (num_elements - num_deleted + delta >= target) {
// Good, we won't be below the shrink threshhold even if we double.
resize_to *= 2;
}
}
dense_hashtable tmp(*this, resize_to);
swap(tmp); // now we are tmp
return true;
}