in connector/src/main/scala/com/microsoft/kusto/spark/datasink/RowCSVWriterUtils.scala [62:102]
private def writeField(
row: SpecializedGetters,
fieldIndexInRow: Int,
dataType: DataType,
timeZone: ZoneId,
writer: Writer,
nested: Boolean): Unit = {
dataType match {
case StringType => writeStringFromUTF8(row.get(fieldIndexInRow, StringType), writer)
case BinaryType => writeStringFromBinary(row.getBinary(fieldIndexInRow), writer)
case DateType =>
writer.writeStringField(DateTimeUtils.toJavaDate(row.getInt(fieldIndexInRow)).toString)
case TimestampType =>
writer.writeStringField(
getLocalDateTimeFromTimestampWithZone(row.getLong(fieldIndexInRow), timeZone).toString)
case BooleanType => writer.write(row.getBoolean(fieldIndexInRow).toString)
case structType: StructType =>
writeJsonField(
convertStructToJson(
row.getStruct(fieldIndexInRow, structType.length),
structType,
timeZone),
writer,
nested)
case arrType: ArrayType =>
writeJsonField(
convertArrayToJson(row.getArray(fieldIndexInRow), arrType.elementType, timeZone),
writer,
nested)
case mapType: MapType =>
writeJsonField(
convertMapToJson(row.getMap(fieldIndexInRow), mapType, timeZone),
writer,
nested)
case ByteType | ShortType | IntegerType | LongType | FloatType | DoubleType =>
writer.write(row.get(fieldIndexInRow, dataType).toString)
case decimalType: DecimalType =>
writeDecimalField(row, fieldIndexInRow, decimalType.precision, decimalType.scale, writer)
case _ => writer.writeStringField(row.get(fieldIndexInRow, dataType).toString)
}
}