in src/java/org/apache/cassandra/tools/SSTableMetadataViewer.java [316:428]
private void printSStableMetadata(File file, boolean scan) throws IOException
{
Descriptor descriptor = Descriptor.fromFileWithComponent(file, false).left;
StatsComponent statsComponent = StatsComponent.load(descriptor);
ValidationMetadata validation = statsComponent.validationMetadata();
StatsMetadata stats = statsComponent.statsMetadata();
CompactionMetadata compaction = statsComponent.compactionMetadata();
SerializationHeader.Component header = statsComponent.serializationHeader();
Class<? extends ICompressor> compressorClass = null;
try (CompressionMetadata compression = CompressionInfoComponent.loadIfExists(descriptor))
{
compressorClass = compression != null ? compression.compressor().getClass() : null;
}
field("SSTable", descriptor);
if (scan && descriptor.version.version.compareTo("ma") >= 0)
{
printScannedOverview(descriptor, stats);
}
if (validation != null)
{
field("Partitioner", validation.partitioner);
field("Bloom Filter FP chance", validation.bloomFilterFPChance);
}
if (stats != null)
{
TimeUnit tsUnit = TimeUnit.MICROSECONDS;
field("Minimum timestamp", toDateString(stats.minTimestamp, tsUnit), Long.toString(stats.minTimestamp));
field("Maximum timestamp", toDateString(stats.maxTimestamp, tsUnit), Long.toString(stats.maxTimestamp));
field("Duration", durationString(stats.maxTimestamp - stats.minTimestamp));
field("SSTable min local deletion time", deletion(stats.minLocalDeletionTime), Long.toString(stats.minLocalDeletionTime));
field("SSTable max local deletion time", deletion(stats.maxLocalDeletionTime), Long.toString(stats.maxLocalDeletionTime));
field("Compressor", compressorClass != null ? compressorClass.getName() : "-");
if (compressorClass != null)
field("Compression ratio", stats.compressionRatio);
field("TTL min", stats.minTTL, toDurationString(stats.minTTL, TimeUnit.SECONDS));
field("TTL max", stats.maxTTL, toDurationString(stats.maxTTL, TimeUnit.SECONDS));
if (validation != null && header != null)
printMinMaxToken(descriptor, FBUtilities.newPartitioner(descriptor), header.getKeyType(), stats);
if (header != null)
{
ClusteringComparator comparator = new ClusteringComparator(header.getClusteringTypes());
field("Covered clusterings", stats.coveredClustering.toString(comparator));
}
field("Estimated droppable tombstones",
stats.getEstimatedDroppableTombstoneRatio((int) (currentTimeMillis() / 1000) - this.gc));
field("SSTable Level", stats.sstableLevel);
field("Repaired at", stats.repairedAt, toDateString(stats.repairedAt, TimeUnit.MILLISECONDS));
field("Originating host id", stats.originatingHostId);
field("Pending repair", stats.pendingRepair);
field("Replay positions covered", stats.commitLogIntervals);
field("totalColumnsSet", stats.totalColumnsSet);
field("totalRows", stats.totalRows);
field("Estimated tombstone drop times", "");
TermHistogram estDropped = new TermHistogram(stats.estimatedTombstoneDropTime,
"Drop Time",
offset -> String.format("%d %s",
offset,
Util.wrapQuiet(toDateString(offset, TimeUnit.SECONDS),
color)),
String::valueOf);
estDropped.printHistogram(out, color, unicode);
field("Partition Size", "");
TermHistogram rowSize = new TermHistogram(stats.estimatedPartitionSize,
"Size (bytes)",
offset -> String.format("%d %s",
offset,
Util.wrapQuiet(toByteString(offset), color)),
String::valueOf);
rowSize.printHistogram(out, color, unicode);
field("Column Count", "");
TermHistogram cellCount = new TermHistogram(stats.estimatedCellPerPartitionCount,
"Columns",
String::valueOf,
String::valueOf);
cellCount.printHistogram(out, color, unicode);
field("Local token space coverage", stats.tokenSpaceCoverage);
}
if (compaction != null)
{
field("Estimated cardinality", compaction.cardinalityEstimator.cardinality());
}
if (header != null)
{
EncodingStats encodingStats = header.getEncodingStats();
AbstractType<?> keyType = header.getKeyType();
List<AbstractType<?>> clusteringTypes = header.getClusteringTypes();
Map<ByteBuffer, AbstractType<?>> staticColumns = header.getStaticColumns();
Map<String, String> statics = staticColumns.entrySet().stream()
.collect(Collectors.toMap(e -> UTF8Type.instance.getString(e.getKey()),
e -> e.getValue().toString()));
Map<ByteBuffer, AbstractType<?>> regularColumns = header.getRegularColumns();
Map<String, String> regulars = regularColumns.entrySet().stream()
.collect(Collectors.toMap(e -> UTF8Type.instance.getString(e.getKey()),
e -> e.getValue().toString()));
field("EncodingStats minTTL", encodingStats.minTTL,
toDurationString(encodingStats.minTTL, TimeUnit.SECONDS));
field("EncodingStats minLocalDeletionTime", toDateString(encodingStats.minLocalDeletionTime,
TimeUnit.SECONDS), Long.toString(encodingStats.minLocalDeletionTime));
field("EncodingStats minTimestamp", toDateString(encodingStats.minTimestamp, tsUnit),
Long.toString(encodingStats.minTimestamp));
field("KeyType", keyType.toString());
field("ClusteringTypes", clusteringTypes.toString());
field("StaticColumns", FBUtilities.toString(statics));
field("RegularColumns", FBUtilities.toString(regulars));
if (stats != null)
field("IsTransient", stats.isTransient);
}
}