extensions/spark/kyuubi-extension-spark-3-5/src/main/scala/org/apache/kyuubi/sql/zorder/OptimizeZorderCommand.scala (3 lines): - line 44: // TODO: Support convert hive relation to datasource relation, can see - line 56: // TODO: Support datasource relation - line 57: // TODO: Support read and insert overwrite the same table for some table format extensions/spark/kyuubi-extension-spark-3-3/src/main/scala/org/apache/kyuubi/sql/zorder/OptimizeZorderCommandBase.scala (3 lines): - line 43: // TODO: Support convert hive relation to datasource relation, can see - line 55: // TODO: Support datasource relation - line 56: // TODO: Support read and insert overwrite the same table for some table format extensions/spark/kyuubi-extension-spark-3-4/src/main/scala/org/apache/kyuubi/sql/zorder/OptimizeZorderCommandBase.scala (3 lines): - line 43: // TODO: Support convert hive relation to datasource relation, can see - line 55: // TODO: Support datasource relation - line 56: // TODO: Support read and insert overwrite the same table for some table format kyuubi-server/web-ui/src/views/management/session/index.vue (3 lines): - line 20: - line 28: - line 35: python/pyhive/presto.py (2 lines): - line 76: # TODO cancel outstanding queries? - line 329: # TODO handle HTTP 503 kyuubi-hive-jdbc/src/main/java/org/apache/kyuubi/jdbc/hive/KyuubiDatabaseMetaData.java (2 lines): - line 138: // TODO: perhaps this could use a better implementation... for now even the Hive query result - line 325: // TODO: verify that this is correct externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/spark/sql/kyuubi/SparkDatasetHelper.scala (2 lines): - line 50: // TODO: avoid extra shuffle if `offset` > 0 - line 115: // TODO: reuse the timeFormatters on greater scale if possible, kyuubi-hive-beeline/src/main/java/org/apache/hive/beeline/BeeLine.java (2 lines): - line 101: *
TODO: - line 2126: // TODO: Make script output prefixing configurable. Had to disable this since kyuubi-hive-beeline/src/main/java/org/apache/hive/beeline/Commands.java (2 lines): - line 1116: // ### FIXME: doing the multi-line handling down here means - line 1644: // ### FIXME: this is broken for multi-line SQL kyuubi-server/src/main/scala/org/apache/kyuubi/server/trino/api/TrinoContext.scala (2 lines): - line 37: // TODO: Support replace `preparedStatement` for Trino-jdbc - line 298: // TODO: make complex data type more accurate kyuubi-server/src/main/scala/org/apache/kyuubi/engine/KyuubiApplicationManager.scala (2 lines): - line 41: // TODO: maybe add a configuration is better - line 224: case appType if appType.startsWith("FLINK") => // TODO: check flink app access local paths kyuubi-hive-jdbc/src/main/java/org/apache/kyuubi/jdbc/hive/KyuubiQueryResultSet.java (2 lines): - line 223: // TODO need session handle - line 230: // TODO: should probably throw an exception here. kyuubi-hive-jdbc/src/main/java/org/apache/kyuubi/jdbc/hive/KyuubiArrowQueryResultSet.java (2 lines): - line 248: // TODO need session handle - line 255: // TODO: should probably throw an exception here. kyuubi-server/src/main/scala/org/apache/kyuubi/server/KyuubiBatchService.scala (2 lines): - line 37: // TODO expose metrics, including pending/running/succeeded/failed batches - line 38: // TODO handle dangling batches, e.g. batch is picked and changed state to pending, python/pyhive/sqlalchemy_hive.py (2 lines): - line 25: # TODO shouldn't use mysql type - line 297: # TODO using TGetColumnsReq hangs after sending TFetchResultsReq. kyuubi-hive-jdbc/src/main/java/org/apache/kyuubi/jdbc/hive/KyuubiStatement.java (1 line): - line 808: // TODO need session handle kyuubi-server/src/main/scala/org/apache/kyuubi/operation/BatchJobSubmission.scala (1 line): - line 355: // TODO: add limit for max batch job submission lifetime kyuubi-server/src/main/scala/org/apache/kyuubi/server/mysql/MySQLCommandHandler.scala (1 line): - line 103: // TODO parse SET command, save other variables at ChannelHandlerContext python/pyhive/sqlalchemy_presto.py (1 line): - line 16: # TODO shouldn't use mysql type extensions/spark/kyuubi-extension-spark-3-3/src/main/scala/org/apache/kyuubi/sql/zorder/InsertZorderBeforeWriting.scala (1 line): - line 55: // TODO: actually, we can force zorder even if existed some shuffle extensions/spark/kyuubi-extension-spark-3-4/src/main/scala/org/apache/spark/sql/InjectCustomResourceProfile.scala (1 line): - line 46: // TODO: Ideally, We can call `CoarseGrainedSchedulerBackend.requestTotalExecutors` eagerly externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/operation/SparkSQLOperationManager.scala (1 line): - line 91: // TODO: respect the config of the operation ExecuteStatement, if it was set. extensions/spark/kyuubi-extension-spark-3-4/src/main/scala/org/apache/spark/sql/FinalStageResourceManager.scala (1 line): - line 85: // TODO: move this to query stage optimizer when updating Spark to 3.5.x externals/kyuubi-jdbc-engine/src/main/scala/org/apache/kyuubi/engine/jdbc/schema/SchemaHelper.scala (1 line): - line 109: // TODO add more type support kyuubi-server/src/main/scala/org/apache/kyuubi/server/BackendServiceMetric.scala (1 line): - line 189: // TODO: the statistics are wrong when we enabled the arrow. externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/spark/sql/execution/arrow/KyuubiArrowConverters.scala (1 line): - line 133: // TODO: refactor and reuse the code from RDD's take() kyuubi-ha/src/main/scala/org/apache/kyuubi/ha/client/etcd/EtcdDiscoveryClient.scala (1 line): - line 200: // TODO: use last one because to avoid touching some maybe-crashed engines kyuubi-server/src/main/scala/org/apache/kyuubi/server/mysql/MySQLDataPackets.scala (1 line): - line 94: // TODO check all possible data types returned from backend service extensions/spark/kyuubi-extension-spark-3-3/src/main/scala/org/apache/spark/sql/FinalStageResourceManager.scala (1 line): - line 82: // TODO: move this to query stage optimizer when updating Spark to 3.5.x kyuubi-ha/src/main/scala/org/apache/kyuubi/ha/client/zookeeper/ZookeeperDiscoveryClient.scala (1 line): - line 179: // TODO: use last one because to avoid touching some maybe-crashed engines kyuubi-hive-beeline/src/main/java/org/apache/hive/beeline/KyuubiCommands.java (1 line): - line 251: // ### FIXME: doing the multi-line handling down here means extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/rule/Authorization.scala (1 line): - line 56: // TODO: Add this line Support for spark3.1, we can remove this extensions/spark/kyuubi-spark-connector-hive/src/main/scala/org/apache/kyuubi/spark/connector/hive/write/HiveBatchWrite.scala (1 line): - line 121: tmpLocation.get.toString, // TODO: URI kyuubi-server/src/main/scala/org/apache/kyuubi/engine/flink/FlinkProcessBuilder.scala (1 line): - line 264: // TODO: Removed this after FLINK-35525 (1.20.0), delegation tokens will be passed externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/schema/RowSet.scala (1 line): - line 107: * TODO: support Flink's new data type system kyuubi-server/src/main/scala/org/apache/kyuubi/sql/parser/KyuubiParserBase.scala (1 line): - line 66: // The following 2 lines are exactly what MySQL does TODO: why do we do this? python/pyhive/sqlalchemy_trino.py (1 line): - line 15: # TODO shouldn't use mysql type python/pyhive/trino.py (1 line): - line 106: # TODO handle HTTP 503 kyuubi-server/src/main/scala/org/apache/kyuubi/engine/JpsApplicationOperation.scala (1 line): - line 105: // TODO check if the process is zombie externals/kyuubi-spark-sql-engine/src/main/scala-2.12/org/apache/kyuubi/engine/spark/repl/KyuubiSparkILoop.scala (1 line): - line 96: // TODO: handle SPARK-47475 since Spark 4.0.0 in the future externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/SparkTBinaryFrontendService.scala (1 line): - line 105: // TODO Support Spark Web UI Enabled SSL extensions/spark/kyuubi-extension-spark-3-3/src/main/scala/org/apache/spark/sql/InjectCustomResourceProfile.scala (1 line): - line 46: // TODO: Ideally, We can call `CoarseGrainedSchedulerBackend.requestTotalExecutors` eagerly extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/PrivilegeObject.scala (1 line): - line 88: ) // TODO: Support catalog for function kyuubi-server/src/main/scala/org/apache/kyuubi/server/http/authentication/KerberosAuthenticationHandler.scala (1 line): - line 69: // TODO: support to config kerberos.name.rules and kerberos.rule.mechanism externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/operation/ExecuteStatement.scala (1 line): - line 250: // TODO: largeVarType support, see SPARK-39979. python/pyhive/hive.py (1 line): - line 117: # TODO verify against parser kyuubi-hive-beeline/src/main/java/org/apache/hive/beeline/DatabaseConnection.java (1 line): - line 173: // TODO: Setting autocommit should not generate an exception as long as it is set to false kyuubi-server/src/main/scala/org/apache/kyuubi/engine/KubernetesApplicationOperation.scala (1 line): - line 197: // TODO add deploy mode to check whether is supported kyuubi-hive-jdbc/src/main/java/org/apache/kyuubi/jdbc/hive/KyuubiSQLException.java (1 line): - line 77: // TODO: set correct vendorCode field extensions/spark/kyuubi-extension-spark-3-4/src/main/scala/org/apache/kyuubi/sql/zorder/InsertZorderBeforeWriting.scala (1 line): - line 54: // TODO: actually, we can force zorder even if existed some shuffle kyuubi-server/src/main/scala/org/apache/kyuubi/engine/trino/TrinoProcessBuilder.scala (1 line): - line 96: // TODO: How shall we deal with proxyUser, extensions/spark/kyuubi-extension-spark-3-5/src/main/scala/org/apache/kyuubi/sql/zorder/InsertZorderBeforeWriting.scala (1 line): - line 62: // TODO: actually, we can force zorder even if existed some shuffle kyuubi-hive-jdbc/src/main/java/org/apache/kyuubi/jdbc/hive/KyuubiConnection.java (1 line): - line 1403: // TODO: throw an exception? extensions/spark/kyuubi-extension-spark-3-5/src/main/scala/org/apache/spark/sql/InjectCustomResourceProfile.scala (1 line): - line 46: // TODO: Ideally, We can call `CoarseGrainedSchedulerBackend.requestTotalExecutors` eagerly externals/kyuubi-hive-sql-engine/src/main/scala/org/apache/kyuubi/engine/hive/HiveSQLEngine.scala (1 line): - line 78: // TODO: hive 2.3.x has scala 2.11 deps. externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/util/SparkCatalogUtils.scala (1 line): - line 215: // TODO: restore view type for session catalog kyuubi-common/src/main/scala/org/apache/kyuubi/operation/log/OperationLog.scala (1 line): - line 131: case _: IOException => // TODO: better do nothing? extensions/spark/kyuubi-spark-lineage/src/main/scala/org/apache/kyuubi/plugin/lineage/dispatcher/atlas/AtlasEntityHelper.scala (1 line): - line 59: // TODO add entity type instead of parsing from string externals/kyuubi-jdbc-engine/src/main/scala/org/apache/kyuubi/engine/jdbc/connection/ConnectionProvider.scala (1 line): - line 66: // TODO support security connection kyuubi-server/web-ui/src/views/management/operation/index.vue (1 line): - line 112: // TODO add delete success or failed logic after api support externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/spark/kyuubi/SQLOperationListener.scala (1 line): - line 110: case _ => "failed" // TODO: Handle JobFailed(exception: Exception) kyuubi-rest-client/src/main/java/org/apache/kyuubi/client/auth/SpnegoAuthHeaderGenerator.java (1 line): - line 76: // TODO do kerberos authentication using JDK class directly kyuubi-hive-beeline/src/main/java/org/apache/hive/beeline/hs2connection/KyuubiConfFileParser.java (1 line): - line 83: // TODO: Kyuubi has different logic to handle SSL and QOP properties with HiveServer2. kyuubi-server/src/main/scala/org/apache/kyuubi/engine/spark/SparkBatchProcessBuilder.scala (1 line): - line 37: // TODO respect doAsEnabled kyuubi-zookeeper/src/main/scala/org/apache/kyuubi/zookeeper/EmbeddedZookeeper.scala (1 line): - line 37: // TODO: Is it right in prod? kyuubi-server/src/main/scala/org/apache/kyuubi/server/api/v1/KyuubiOpenApiResource.scala (1 line): - line 88: // TODO: to improve when https is enabled. python/setup.cfg (1 line): - line 13: # TODO For old sqlalchemy externals/kyuubi-trino-engine/src/main/scala/org/apache/kyuubi/engine/trino/operation/TrinoOperationManager.scala (1 line): - line 129: // TODO: Supports the GetFunctions operation when Trino supports the query of the functions. extensions/spark/kyuubi-extension-spark-3-5/src/main/scala/org/apache/spark/sql/FinalStageResourceManager.scala (1 line): - line 85: // TODO: move this to query stage optimizer when updating Spark to 3.5.x