in connector/src/main/scala/com/datastax/spark/connector/rdd/reader/FunctionBasedRowReader.scala [201:229]
override def read(row: Row, rowMetaData: CassandraRowMetadata) =
f(
a0c.convert(GettableData.get(row, 0, rowMetaData.codecs(0))),
a1c.convert(GettableData.get(row, 1, rowMetaData.codecs(1))),
a2c.convert(GettableData.get(row, 2, rowMetaData.codecs(2))),
a3c.convert(GettableData.get(row, 3, rowMetaData.codecs(3))),
a4c.convert(GettableData.get(row, 4, rowMetaData.codecs(4))),
a5c.convert(GettableData.get(row, 5, rowMetaData.codecs(5))),
a6c.convert(GettableData.get(row, 6, rowMetaData.codecs(6))),
a7c.convert(GettableData.get(row, 7, rowMetaData.codecs(7))),
a8c.convert(GettableData.get(row, 8, rowMetaData.codecs(8)))
)
}
class FunctionBasedRowReader10[R, A0, A1, A2, A3, A4, A5, A6, A7, A8, A9]
(f: (A0, A1, A2, A3, A4, A5, A6, A7, A8, A9) => R)(
implicit
a0c: TypeConverter[A0],
a1c: TypeConverter[A1],
a2c: TypeConverter[A2],
a3c: TypeConverter[A3],
a4c: TypeConverter[A4],
a5c: TypeConverter[A5],
a6c: TypeConverter[A6],
a7c: TypeConverter[A7],
a8c: TypeConverter[A8],
a9c: TypeConverter[A9],
@transient override val ct: ClassTag[R])
extends FunctionBasedRowReader[R] {