in src/java/org/apache/cassandra/io/sstable/metadata/StatsMetadata.java [525:683]
public StatsMetadata deserialize(Version version, DataInputPlus in) throws IOException
{
EstimatedHistogram partitionSizes = EstimatedHistogram.serializer.deserialize(in);
if (partitionSizes.isOverflowed())
{
logger.warn("Deserialized partition size histogram with {} values greater than the maximum of {}. " +
"Clearing the overflow bucket to allow for degraded mean and percentile calculations...",
partitionSizes.overflowCount(), partitionSizes.getLargestBucketOffset());
partitionSizes.clearOverflow();
}
EstimatedHistogram columnCounts = EstimatedHistogram.serializer.deserialize(in);
if (columnCounts.isOverflowed())
{
logger.warn("Deserialized partition cell count histogram with {} values greater than the maximum of {}. " +
"Clearing the overflow bucket to allow for degraded mean and percentile calculations...",
columnCounts.overflowCount(), columnCounts.getLargestBucketOffset());
columnCounts.clearOverflow();
}
CommitLogPosition commitLogLowerBound = CommitLogPosition.NONE, commitLogUpperBound;
commitLogUpperBound = CommitLogPosition.serializer.deserialize(in);
long minTimestamp = in.readLong();
long maxTimestamp = in.readLong();
long minLocalDeletionTime;
long maxLocalDeletionTime;
if (version.hasUIntDeletionTime())
{
minLocalDeletionTime = Cell.deletionTimeUnsignedIntegerToLong(in.readInt());
maxLocalDeletionTime = Cell.deletionTimeUnsignedIntegerToLong(in.readInt());
}
else
{
minLocalDeletionTime = in.readInt();
if (minLocalDeletionTime == Integer.MAX_VALUE)
minLocalDeletionTime = Cell.NO_DELETION_TIME;
maxLocalDeletionTime = in.readInt();
if (maxLocalDeletionTime == Integer.MAX_VALUE)
maxLocalDeletionTime = Cell.NO_DELETION_TIME;
}
int minTTL = in.readInt();
int maxTTL = in.readInt();
double compressionRatio = in.readDouble();
TombstoneHistogram tombstoneHistogram = TombstoneHistogram.getSerializer(version).deserialize(in);
int sstableLevel = in.readInt();
long repairedAt = in.readLong();
List<AbstractType<?>> clusteringTypes = null;
Slice coveredClustering = Slice.ALL;
if (version.hasLegacyMinMax())
{
// We always deserialize the min/max clustering values if they are there, but we ignore them for
// legacy sstables where !hasAccurateMinMax due to CASSANDRA-14861.
int colCount = in.readInt();
ByteBuffer[] minClusteringValues = new ByteBuffer[colCount];
for (int i = 0; i < colCount; i++)
minClusteringValues[i] = ByteBufferUtil.readWithShortLength(in);
colCount = in.readInt();
ByteBuffer[] maxClusteringValues = new ByteBuffer[colCount];
for (int i = 0; i < colCount; i++)
maxClusteringValues[i] = ByteBufferUtil.readWithShortLength(in);
if (version.hasAccurateMinMax())
coveredClustering = Slice.make(BufferClusteringBound.inclusiveStartOf(minClusteringValues),
BufferClusteringBound.inclusiveEndOf(maxClusteringValues));
}
else if (version.hasImprovedMinMax())
{
// improvedMinMax will be in this place when legacyMinMax is removed
clusteringTypes = typeSerializer.deserializeList(in);
coveredClustering = Slice.serializer.deserialize(in, version.correspondingMessagingVersion(), clusteringTypes);
}
boolean hasLegacyCounterShards = in.readBoolean();
long totalColumnsSet = in.readLong();
long totalRows = in.readLong();
if (version.hasCommitLogLowerBound())
commitLogLowerBound = CommitLogPosition.serializer.deserialize(in);
IntervalSet<CommitLogPosition> commitLogIntervals;
if (version.hasCommitLogIntervals())
commitLogIntervals = commitLogPositionSetSerializer.deserialize(in);
else
commitLogIntervals = new IntervalSet<>(commitLogLowerBound, commitLogUpperBound);
TimeUUID pendingRepair = null;
if (version.hasPendingRepair() && in.readByte() != 0)
{
pendingRepair = TimeUUID.deserialize(in);
}
boolean isTransient = version.hasIsTransient() && in.readBoolean();
UUID originatingHostId = null;
if (version.hasOriginatingHostId() && in.readByte() != 0)
originatingHostId = UUIDSerializer.serializer.deserialize(in, 0);
// If not recorded, the only time we can guarantee there is no partition level deletion is if there is no
// deletion at all. Otherwise, we have to assume there may be some.
boolean hasPartitionLevelDeletions = minLocalDeletionTime != Cell.NO_DELETION_TIME;
if (version.hasPartitionLevelDeletionsPresenceMarker())
{
hasPartitionLevelDeletions = in.readBoolean();
}
if (version.hasImprovedMinMax() && version.hasLegacyMinMax())
{
// improvedMinMax will be in this place until legacyMinMax is removed
clusteringTypes = typeSerializer.deserializeList(in);
coveredClustering = Slice.serializer.deserialize(in, version.correspondingMessagingVersion(), clusteringTypes);
}
ByteBuffer firstKey = null;
ByteBuffer lastKey = null;
if (version.hasKeyRange())
{
firstKey = ByteBufferUtil.readWithVIntLength(in);
lastKey = ByteBufferUtil.readWithVIntLength(in);
}
double tokenSpaceCoverage = Double.NaN;
if (version.hasTokenSpaceCoverage())
{
tokenSpaceCoverage = in.readDouble();
}
return new StatsMetadata(partitionSizes,
columnCounts,
commitLogIntervals,
minTimestamp,
maxTimestamp,
minLocalDeletionTime,
maxLocalDeletionTime,
minTTL,
maxTTL,
compressionRatio,
tombstoneHistogram,
sstableLevel,
clusteringTypes,
coveredClustering,
hasLegacyCounterShards,
repairedAt,
totalColumnsSet,
totalRows,
tokenSpaceCoverage,
originatingHostId,
pendingRepair,
isTransient,
hasPartitionLevelDeletions,
firstKey,
lastKey);
}