private long removeAt()

in protonj2/src/main/java/org/apache/qpid/protonj2/engine/util/UnsettledMap.java [489:558]


    private long removeAt(int bucketIndex, int bucketEntry) {
        if (bucketIndex >= buckets.length || bucketEntry >= UNSETTLED_BUCKET_SIZE) {
            throw new IndexOutOfBoundsException(String.format(
                "Cannot remove an entry from segment %d at index %d which is outside the tracked bounds", bucketIndex, bucketEntry));
        }

        final UnsettledBucket<Delivery> bucket = buckets[bucketIndex];

        bucket.removeAt(bucketEntry);
        totalEntries--;
        modCount++;

        long result = -1;

        if (bucket.isReadable()) {
            final int nextBucketIndex = bucketIndex + 1;
            final int prevBucketIndex = bucketIndex - 1;

            final UnsettledBucket<Delivery> nextBucket =
                bucketIndex == current || Integer.compareUnsigned(bucket.highestDeliveryId, buckets[nextBucketIndex].lowestDeliveryId) > 0 ?
                    ALWAYS_FULL_BUCKET : buckets[nextBucketIndex];
            final UnsettledBucket<Delivery> prevBucket =
                bucketIndex == 0 || Integer.compareUnsigned(bucket.lowestDeliveryId, buckets[prevBucketIndex].highestDeliveryId) < 0 ?
                    ALWAYS_FULL_BUCKET : buckets[prevBucketIndex];

            // As soon as compaction is possible move elements from this bucket into previous and next
            // which reduces search times as there are fewer buckets to traverse/
            if (nextBucket.getFreeSpace() + prevBucket.getFreeSpace() >= bucket.entries) {
                final int toCopyBackward = Math.min(prevBucket.getFreeSpace(), bucket.entries);
                final int nextEntryOffset = ++bucketEntry - (bucket.readOffset + toCopyBackward);

                if (nextEntryOffset < 0) {
                    // Moved into the previous bucket so the index being negative
                    // give us the located when added to the previous write offset
                    result = (long) (prevBucketIndex) << 32;
                    result |= prevBucket.writeOffset + nextEntryOffset;
                } else if (nextBucket.entries + (bucket.entries - toCopyBackward) > 0) {
                    // Moved into the next bucket gives of the raw index so long
                    // as we compact entries to zero (otherwise it is the read offset
                    result = (long) bucketIndex << 32;
                    result |= nextEntryOffset;
                }

                doCompaction(bucket, prevBucket, nextBucket);
                recycleBucket(bucketIndex);
            } else {
                // If the element removed is not the last in this bucket then the next is just
                // the next element otherwise it is the first element of the next bucket if it
                // non-empty otherwise we reached the end of the elements.
                if (++bucketEntry < bucket.writeOffset) {
                    result = (long) bucketIndex << 32;
                    result |= bucketEntry;
                } else if (nextBucketIndex <= current && buckets[nextBucketIndex].entries > 0) {
                    result = (long) nextBucketIndex << 32;
                    result |= buckets[nextBucketIndex].readOffset;
                }
            }
        } else {
            recycleBucket(bucketIndex);

            // The bucket was empty and will be recycled shifting down all buckets following it and if
            // there is a next non-empty bucket then the next entry is the first entry in that bucket.
            if (bucketIndex <= current && buckets[bucketIndex].entries > 0) {
                result = (long) bucketIndex << 32;
                result |= buckets[bucketIndex].readOffset;
            }
        }

        return result;
    }