spark-connector/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveClientImpl.scala (6 lines): - line 462: // TODO: In future we can have Spark support columns sorted in descending order - line 643: // TODO: figure out how to drop multiple partitions in one call - line 910: new Path(loadPath), // TODO: Use URI - line 920: loadPath: String, // TODO URI - line 1233: // TODO: stats should include all the other two fields (`numFiles` and `numPartitions`). - line 1246: // TODO: still fill the rowCount even if sizeInBytes is empty. Might break anything? spark-connector/common/src/main/java/org/apache/spark/sql/odps/table/tunnel/write/TunnelArrowBatchWriter.java (5 lines): - line 80: // TODO: now hard code arrow timestamp nanos - line 90: // TODO: use odps sdk schema utils - line 95: // TODO: lazy create writer - line 150: // TODO: support schema - line 163: // TODO: compression spark-connector/common/src/main/java/org/apache/spark/sql/odps/table/tunnel/write/TunnelTableBatchWriteSession.java (5 lines): - line 64: // TODO: support schema - line 78: // TODO: support schema - line 92: // TODO: support schema - line 140: // TODO: check session - line 186: // TODO: support schema flink-connector-odps/src/main/java/org/apache/flink/odps/catalog/OdpsCatalog.java (5 lines): - line 203: // TODO: table properties - line 208: // TODO: support persist properties - line 226: // TODO: view params - line 254: // TODO: CatalogView - line 600: // TODO: more info spark-connector/hive/src/main/scala/org/apache/spark/sql/hive/HiveInspectors.scala (4 lines): - line 132: * UnionObjectInspector: (tag: Int, object data) (TODO: not supported by SparkSQL yet) - line 276: // TODO we don't support the HiveVarcharObjectInspector yet. - line 847: // TODO decimal precision? - line 1123: // TODO precise, scale? spark-connector/common/src/main/java/org/apache/spark/sql/odps/table/tunnel/read/TunnelTableBatchReadSession.java (3 lines): - line 94: // TODO: support schema - line 105: // TODO: public odps sdk need table.getPartitionSpecs(); - line 252: // TODO: support schema spark-connector/hive/src/main/scala/org/apache/spark/sql/hive/HiveExternalCatalog.scala (3 lines): - line 142: // TODO: Load defaults in cluster mode - line 152: // TODO statsLevel expects `none`, `size`, `rowCount` and `colStats`, - line 954: // TODO: support aliort spark-connector/common/src/main/java/org/apache/spark/sql/odps/table/tunnel/write/TunnelRecordWriter.java (2 lines): - line 121: // TODO: support schema - line 134: // TODO: compression odps-sqoop/pom-old.xml (2 lines): - line 95: - line 224: FIXME we must depends on org.apache ! flink-connector-odps/src/main/java/org/apache/flink/odps/table/OdpsDynamicTableSource.java (2 lines): - line 91: // TODO: Singleton - line 158: // TODO: cache partitions odps-data-carrier/meta-carrier/src/main/java/com/aliyun/odps/datacarrier/metacarrier/HiveMetaCarrier.java (2 lines): - line 150: // TODO: what if there are more than 32767 partitions - line 151: // TODO: support parquet spark-connector/hive/src/main/scala/org/apache/spark/sql/hive/execution/OdpsTableScanExec.scala (2 lines): - line 180: // TODO: support three tier model - line 393: // TODO: bucket presto-connector/src/main/java/com/facebook/presto/maxcompute/MaxComputeMetadata.java (2 lines): - line 94: // TODO: use cache - line 144: // TODO: deal with partition presto-connector/src/main/java/com/facebook/presto/maxcompute/utils/TypeConvertUtils.java (2 lines): - line 75: // TODO: support complex type (array, map, struct) - line 134: // TODO: support map and struct odps-sqoop/src/java/org/apache/sqoop/lib/JdbcWritableBridge.java (2 lines): - line 247: // TODO: support this. - line 253: // TODO: support this. spark-connector/datasource/src/main/scala/org/apache/spark/sql/execution/datasources/v2/odps/OdpsMetaClient.scala (2 lines): - line 48: // TODO partStatsLevel expects `none`, `size`, `rowCount` and `colStats`, - line 222: // TODO: Load defaults in cluster mode trino-connector/src/main/java/io/trino/plugin/maxcompute/MaxComputeMetadata.java (2 lines): - line 92: // TODO: use cache - line 145: // // TODO: deal with partition trino-connector/src/main/java/io/trino/plugin/maxcompute/utils/TypeConvertUtils.java (2 lines): - line 75: // TODO: support complex type (array, map, struct) - line 134: // TODO: support map and struct odps-sqoop/src/java/org/apache/sqoop/mapreduce/MySQLExportMapper.java (2 lines): - line 321: // TODO: Support additional encodings. - line 361: // TODO: Support user-configurable encodings. presto-connector/src/main/java/com/facebook/presto/maxcompute/utils/ArrowToPageConverter.java (1 line): - line 100: // TODO: support map and struct flink-connector-odps/src/main/java/org/apache/flink/odps/output/writer/file/ColumnarBlockWriter.java (1 line): - line 48: // TODO: OdpsWriteOptions odps-data-carrier/meta-processor/src/main/java/com/aliyun/odps/datacarrier/metaprocessor/HiveTypeTransformer.java (1 line): - line 41: //TODO: support odps1.0 odps-sqoop/src/java/org/apache/sqoop/SqoopOptions.java (1 line): - line 2103: * TODO: Expose this setter via the command-line arguments for the codegen spark-connector/hive/src/main/scala/org/apache/spark/sql/hive/execution/OdpsSqlParser.scala (1 line): - line 36: // TODO: support truncate table flink-connector-odps/src/main/java/org/apache/flink/odps/input/reader/CupidBatchIterator.java (1 line): - line 103: // TODO: record may be null odps-sqoop/src/java/org/apache/sqoop/manager/oracle/OraOopDBRecordReader.java (1 line): - line 138: // TODO Cast this using desiredSplitClass, so we only need 1 line of code odps-sqoop/src/java/org/apache/sqoop/manager/NetezzaManager.java (1 line): - line 151: // TODO Force batchmode? spark-connector/common/src/main/java/org/apache/spark/sql/odps/table/tunnel/read/TunnelSplitRecordReader.java (1 line): - line 129: // TODO: support schema odps-sqoop/src/java/org/apache/sqoop/tool/JobTool.java (1 line): - line 251: // TODO: This does not show entries in the Configuration trino-connector/src/main/java/io/trino/plugin/maxcompute/MaxComputeColumnHandle.java (1 line): - line 64: // TODO: support comment, is null and other metadata odps-sqoop/src/java/org/apache/sqoop/mapreduce/db/DBRecordReader.java (1 line): - line 82: // TODO (aaron): Refactor constructor to take fewer arguments odps-data-carrier/meta-processor/src/main/java/com/aliyun/odps/datacarrier/metaprocessor/MetaProcessor.java (1 line): - line 134: // TODO: check table name conflicts odps-sqoop/src/java/org/apache/sqoop/avro/AvroUtil.java (1 line): - line 158: // TODO: Should convert BytesWritable to BlobRef properly. (SQOOP-991) cupid-table-api/src/main/java/com/aliyun/odps/cupid/table/v1/reader/ReadCapabilities.java (1 line): - line 48: // TODO: support split mode and data columns spark-connector/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveClient.scala (1 line): - line 254: loadPath: String, // TODO URI flink-connector-odps/src/main/java/org/apache/flink/odps/output/writer/file/StaticOdpsPartitionWrite.java (1 line): - line 80: // TODO: use write options odps-sqoop/src/java/org/apache/sqoop/mapreduce/db/DBInputFormat.java (1 line): - line 108: // TODO Add a layer to enable SQL "sharding" and support locality spark-connector/hive/src/main/scala/org/apache/spark/sql/hive/execution/PruneHiveTablePartitions.scala (1 line): - line 41: * TODO: merge this with PruneFileSourcePartitions after we completely make hive as a data source. odps-sqoop/src/java/org/apache/sqoop/mapreduce/odps/HdfsOdpsImportJob.java (1 line): - line 183: // TODO validateImport spark-connector/hive/src/main/scala/org/apache/spark/sql/hive/execution/CreateOdpsTableAsSelectCommand.scala (1 line): - line 64: // TODO ideally, we should get the output data ready first and then flink-connector-odps/src/main/java/org/apache/flink/odps/output/writer/stream/GroupedOdpsPartitionStreamWrite.java (1 line): - line 70: // TODO: cluster mode support grouped writer spark-connector/hive/src/main/scala/org/apache/spark/sql/hive/hiveUDFs.scala (1 line): - line 96: // TODO: Finish input output types. spark-connector/common/src/main/scala/org/apache/spark/sql/odps/OdpsUtils.scala (1 line): - line 153: // TODO: use odps-sdk setDateAsLocalDate spark-connector/common/src/main/java/org/apache/spark/sql/odps/table/tunnel/read/TunnelArrowSplitReader.java (1 line): - line 161: // TODO: support schema spark-connector/common/src/main/java/org/apache/spark/sql/odps/vectorized/OdpsArrowColumnVector.java (1 line): - line 101: * TODO: rebaseJulianToGregorianDays hive_data_transfer_udtf/src/main/java/odps/data/dump/MaxComputeDataTransferUDTFMultiPart.java (1 line): - line 33: // TODO: refactor spark-connector/hive/src/main/scala/org/apache/spark/sql/hive/client/IsolatedClientLoader.scala (1 line): - line 153: // TODO: Remove copy logic. odps-sqoop/src/java/org/apache/sqoop/lib/RecordParser.java (1 line): - line 103: // TODO: Support field name spark-connector/hive/src/main/scala/org/apache/spark/sql/hive/HiveStrategies.scala (1 line): - line 172: // TODO: bucket filters spark-connector/common/src/main/java/org/apache/spark/sql/odps/table/utils/ArrowSchemaUtils.java (1 line): - line 140: // TODO: decimal spark-connector/hive/src/main/scala/org/apache/spark/sql/hive/HiveUtils.scala (1 line): - line 443: // TODO: Support for loading the jars from an already downloaded location. flink-connector-odps/src/main/java/org/apache/flink/odps/output/writer/OdpsWriteFactory.java (1 line): - line 41: // TODO: support for cluster mode flink-connector-odps/src/main/java/org/apache/flink/odps/table/OdpsDynamicTableSink.java (1 line): - line 221: //TODO: support file sink function for cluster mode spark-connector/datasource/src/main/scala/org/apache/spark/sql/execution/datasources/v2/odps/OdpsBatchWrite.scala (1 line): - line 59: // TODO: schema spark-connector/common/src/main/scala/org/apache/spark/sql/odps/execution/exchange/OdpsShuffleExchangeExec.scala (1 line): - line 246: // TODO: Handle BroadcastPartitioning. odps-sqoop/src/java/org/apache/sqoop/manager/OracleManager.java (1 line): - line 1057: // TODO select the appropriate column instead of the first column based presto-connector/src/main/java/com/facebook/presto/maxcompute/MaxComputeColumnHandle.java (1 line): - line 64: // TODO: support comment, is null and other metadata odps-sqoop/src/java/org/apache/sqoop/mapreduce/db/DataDrivenDBInputFormat.java (1 line): - line 138: // TODO: Support BINARY, VARBINARY, LONGVARBINARY, DISTINCT, CLOB, trino-connector/src/main/java/io/trino/plugin/maxcompute/utils/MaxComputeMetaCache.java (1 line): - line 36: // TODO: cache options flink-connector-odps/src/main/java/org/apache/flink/odps/input/reader/RecordIterator.java (1 line): - line 99: // TODO:record may be null in cluster mode spark-connector/common/src/main/scala/org/apache/spark/sql/odps/OdpsPartitionReaderFactory.scala (1 line): - line 198: // TODO: bearer token refresh spark-connector/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveShim.scala (1 line): - line 1506: // TODO: Now, always set environmentContext to null. In the future, we should avoid setting odps-data-carrier/data-transfer-hive-udtf/src/main/java/com/aliyun/odps/datacarrier/transfer/converter/HiveCharObjectConverter.java (1 line): - line 30: // TODO: support hive.compatible (return a Char object) spark-connector/common/src/main/scala/org/apache/spark/sql/odps/OdpsTableDataWriter.scala (1 line): - line 404: * TODO: DynamicRecordWriter, now mode not support write dynamic partition spark-connector/common/src/main/java/org/apache/spark/sql/odps/OdpsClient.java (1 line): - line 129: // TODO: modify settings? spark-connector/hive/src/main/scala/org/apache/spark/sql/hive/execution/InsertIntoOdpsTable.scala (1 line): - line 92: // TODO: null partition odps-sqoop/src/java/com/cloudera/sqoop/mapreduce/db/DBRecordReader.java (1 line): - line 42: // TODO (aaron): Refactor constructor to take fewer arguments odps-sqoop/src/java/org/apache/sqoop/tool/BaseSqoopTool.java (1 line): - line 1690: // TODO: validate more options odps-sqoop/src/java/org/apache/sqoop/mapreduce/JobBase.java (1 line): - line 277: //TODO: 'splitByCol' is import-job specific; lift it out of this API. spark-connector/common/src/main/scala/org/apache/spark/sql/odps/ArrowUtils.scala (1 line): - line 85: /** TODO: for odps extension legacy decimal */ odps-data-carrier/meta-processor/src/main/java/com/aliyun/odps/datacarrier/metaprocessor/report/ReportBuilder.java (1 line): - line 33: // TODO: order the databases & tables flink-connector-odps/src/main/java/org/apache/flink/odps/util/OdpsTypeUtil.java (1 line): - line 236: // TODO: in 1.9 always == 10 spark-connector/hive/src/main/scala/org/apache/spark/sql/hive/HiveSessionCatalog.scala (1 line): - line 48: // TODO: Load defaults in cluster mode presto-connector/src/main/java/com/facebook/presto/maxcompute/utils/MaxComputeMetaCache.java (1 line): - line 36: // TODO: cache options flink-connector-odps/src/main/java/org/apache/flink/odps/util/OdpsTypeConverter.java (1 line): - line 596: * TODO take TimeZone into consideration. odps-sqoop/src/java/org/apache/sqoop/metastore/hsqldb/HsqldbJobStorage.java (1 line): - line 518: // TODO: Sanity-check the value of rootTableName to ensure it is