in cassandra-analytics-core/src/main/java/org/apache/cassandra/spark/sparksql/AbstractSparkRowIterator.java [105:153]
public boolean next() throws IOException
{
// We are finished if not already reading a row (if cell != null, it can happen if previous row was incomplete)
// and SparkCellIterator has no next value
if (cell == null && !it.hasNextThrows())
{
return false;
}
// Pivot values to normalize each cell into single SparkSQL or 'CQL' type row
do
{
if (cell == null)
{
// Read next cell
cell = it.next();
}
if (builder.isFirstCell())
{
// On first iteration, copy all partition keys, clustering keys, static columns
assert cell.isNewRow;
builder.copyKeys(cell);
}
else if (cell.isNewRow)
{
// Current row is incomplete, so we have moved to new row before reaching end
// break out to return current incomplete row and handle next row in next iteration
break;
}
builder.onCell(cell);
if (!noValueColumns && !cell.isTombstone())
{
// If schema has value column or not a row deletion then copy across
builder.copyValue(cell);
}
cell = null;
// Keep reading more cells until we read the entire row
} while (builder.hasMoreCells() && it.hasNextThrows());
// Build row and reset builder for next row
row = builder.build();
builder.reset();
stats.nextRow();
return true;
}