in cassandra-four-zero/src/main/java/org/apache/cassandra/spark/reader/CompressionMetadata.java [48:92]
static CompressionMetadata fromInputStream(InputStream inStream, boolean hasCompressedLength) throws IOException
{
long dataLength;
BigLongArray chunkOffsets;
DataInputStream inData = new DataInputStream(inStream);
String compressorName = inData.readUTF();
int optionCount = inData.readInt();
Map<String, String> options = new HashMap<>(optionCount);
for (int option = 0; option < optionCount; ++option)
{
options.put(inData.readUTF(), inData.readUTF());
}
int chunkLength = inData.readInt();
int minCompressRatio = 2147483647;
if (hasCompressedLength)
{
minCompressRatio = inData.readInt();
}
CompressionParams params = new CompressionParams(compressorName, chunkLength, minCompressRatio, options);
params.setCrcCheckChance(AbstractCompressionMetadata.CRC_CHECK_CHANCE);
dataLength = inData.readLong();
int chunkCount = inData.readInt();
chunkOffsets = new BigLongArray(chunkCount);
for (int chunk = 0; chunk < chunkCount; chunk++)
{
try
{
chunkOffsets.set(chunk, inData.readLong());
}
catch (EOFException exception)
{
throw new EOFException(String.format("Corrupted compression index: read %d but expected %d chunks.",
chunk, chunkCount));
}
}
return new CompressionMetadata(dataLength, chunkOffsets, params);
}