func Collapse()

in processor/lsmintervalprocessor/internal/data/expo/scale.go [94:137]


func Collapse(bs Buckets) {
	counts := bs.BucketCounts()
	size := counts.Len() / 2
	if counts.Len()%2 != 0 || bs.Offset()%2 != 0 {
		size++
	}

	// merging needs to happen in pairs aligned to i=0. if offset is non-even,
	// we need to shift the whole merging by one to make above condition true.
	shift := 0
	if bs.Offset()%2 != 0 {
		bs.SetOffset(bs.Offset() - 1)
		shift--
	}
	bs.SetOffset(bs.Offset() / 2)

	for i := 0; i < size; i++ {
		// size is ~half of len. we add two buckets per iteration.
		// k jumps in steps of 2, shifted if offset makes this necessary.
		k := i*2 + shift

		// special case: we just started and had to shift. the left half of the
		// new bucket is not actually stored, so only use counts[0].
		if i == 0 && k == -1 {
			counts.SetAt(i, counts.At(k+1))
			continue
		}

		// new[k] = old[k]+old[k+1]
		counts.SetAt(i, counts.At(k))
		if k+1 < counts.Len() {
			counts.SetAt(i, counts.At(k)+counts.At(k+1))
		}
	}

	// zero the excess area. its not needed to represent the observation
	// anymore, but kept for two reasons:
	// 1. future observations may need it, no need to re-alloc then if kept
	// 2. [pcommon.Uint64Slice] cannot, in fact, be sliced, so getting rid
	//    of it would alloc ¯\_(ツ)_/¯
	for i := size; i < counts.Len(); i++ {
		counts.SetAt(i, 0)
	}
}