flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/planner/plan/metadata/SelectivityEstimator.scala (9 lines): - line 237: // TODO supports more operators - line 274: // TODO supports more operators - line 431: // TODO not take include min/max into consideration now - line 589: // TODO: It is difficult to support binary comparisons for non-numeric type - line 668: // TODO not take includeMin into consideration now - line 680: // TODO not take includeMax into consideration now - line 693: // TODO not take includeMin into consideration now - line 704: // TODO not take includeMax into consideration now - line 738: // TODO: It is difficult to support binary comparisons for non-numeric type flink-table/flink-sql-jdbc-driver/src/main/java/org/apache/flink/table/jdbc/FlinkResultSet.java (9 lines): - line 104: // TODO check the kind of currentRow - line 442: // TODO should be supported after - line 515: // TODO support array data - line 526: // TODO get date with timezone - line 532: // TODO get date with timezone - line 538: // TODO get time with timezone - line 544: // TODO get time with timezone - line 550: // TODO get timestamp with timezone - line 556: // TODO get timestamp with timezone flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql/validate/SqlValidatorImpl.java (9 lines): - line 277: // TODO jvs 11-Dec-2008: make this local to performUnconditionalRewrites - line 684: // TODO: do real implicit collation here - line 753: // TODO: do real implicit collation here - line 1814: // TODO jvs 10-Oct-2005: get rid of this workaround once - line 1944: // TODO jvs 12-Feb-2005: proper type name formatting - line 2123: // TODO: eventually should assert(operandTypeInference != null) - line 2896: // FIXME jvs 9-Feb-2005: Correlation should - line 4577: // TODO: when SELECT appears as a value sub-query, should be using - line 6738: // TODO: Don't expand the list every time. Maybe keep an expanded flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/SqlToRelConverter.java (7 lines): - line 1836: // TODO: remove requireNonNull when - line 2383: *
TODO At present, it is a relatively hacked way - line 2431: // TODO add query-block alias hint in SqlNode instead of here - line 2626: // TODO combine with buildCollation method after support NULLS_FIRST/NULLS_LAST - line 3788: // TODO: add validation rules to SqlValidator also - line 3886: // TODO: handle collation sequence - line 3887: // TODO: flag expressions as non-standard flink-runtime/src/main/java/org/apache/flink/runtime/state/OperatorStateRestoreOperation.java (6 lines): - line 102: // TODO when eager state registration is in place, we can try to get a - line 104: // TODO from the newly registered typeSerializer instead of simply failing - line 125: // TODO with eager state registration in place, check here for - line 146: // TODO when eager state registration is in place, we can try to get a - line 148: // TODO from the newly registered typeSerializer instead of simply failing - line 169: // TODO with eager state registration in place, check here for flink-runtime/src/main/java/org/apache/flink/runtime/state/FullSnapshotAsyncWriter.java (6 lines): - line 116: // TODO: this code assumes that writing a serializer is threadsafe, we - line 155: // TODO this could be aware of keyGroupPrefixBytes and write only one byte - line 184: // TODO this could be aware of keyGroupPrefixBytes and write only one - line 193: // TODO this could be aware of keyGroupPrefixBytes and write only one - line 203: // TODO this could be aware of keyGroupPrefixBytes and write only one - line 219: // TODO this could be aware of keyGroupPrefixBytes and write only one byte if flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql/type/SqlTypeFactoryImpl.java (6 lines): - line 345: // TODO: refine collation combination rules - line 380: // TODO: come up with a cleaner way to support - line 427: // TODO: only promote when required - line 450: // TODO: come up with a cleaner way to support - line 462: // TODO jvs 4-June-2005: This shouldn't be necessary; - line 471: // TODO: come up with a cleaner way to support flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/utils/DynamicPartitionPruningUtils.java (5 lines): - line 137: // TODO Let visitDimSide more efficient and more accurate. Like a filter on dim table or - line 229: // TODO adding more suitable filters which can filter enough partitions after - line 427: // TODO In the future, we need to support more operators to enrich matchable dpp - line 494: // TODO FLINK-28864 check if the source used by the DataStreamScanProvider is - line 500: // TODO supports more flink-runtime/src/main/java/org/apache/flink/runtime/state/metainfo/StateMetaInfoSnapshot.java (5 lines): - line 98: // TODO this will go away once all serializers have the restoreSerializer() factory method - line 112: * TODO this variant, which requires providing the serializers, TODO should actually be removed, - line 113: * leaving only {@link #StateMetaInfoSnapshot(String, BackendStateType, Map, Map)}. TODO This is - line 115: * meta TODO info subclasses), and will be removed once all serializers have the - line 171: /** TODO this method should be removed once the serializer map is removed. */ flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/RelDecorrelator.java (5 lines): - line 131: *
TODO: - line 1854: // TODO: ideally this only needs to be called if the result - line 1867: // TODO: Comments in RexShuttle.visitCall() mention other - line 2902: // TODO: create immutable copies of all maps - line 2928: // TODO: Multimap does not have well-defined equals behavior flink-table/flink-sql-jdbc-driver/src/main/java/org/apache/flink/table/jdbc/FlinkConnection.java (5 lines): - line 86: // TODO We currently do not support this, but we can't throw a SQLException here because we want - line 91: // TODO We currently do not support this, but we can't throw a SQLException here because we want - line 168: // TODO We currently do not support this, but we can't throw a SQLException here because we want - line 173: // TODO We currently do not support this, but we can't throw a SQLException here because we want - line 180: // TODO We currently do not support this, but we can't throw a SQLException here because we want flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/optimize/StreamNonDeterministicUpdatePlanVisitor.java (5 lines): - line 250: // TODO should optimize it after SinkUpsertMaterializer support upsertKey - line 498: // TODO remove this conversion when scala-free was total done. - line 525: // TODO remove this conversion when scala-free was total done. - line 584: // TODO remove this conversion when scala-free was total done. - line 595: // TODO remove this conversion when scala-free was total done. flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/rules/logical/SubQueryDecorrelator.java (4 lines): - line 316: // TODO supports correlation variable with OR - line 451: // TODO Currently, correlation in projection is not supported. - line 1001: // TODO does not allow correlation condition in its inputs now - line 1374: // TODO: create immutable copies of all maps flink-runtime/src/main/java/org/apache/flink/runtime/resourcemanager/ResourceManager.java (4 lines): - line 407: // TODO: Maybe it's also ok to skip this check in case that we cannot check the - line 1048: // TODO :: suggest old taskExecutor to stop itself - line 1187: // TODO :: suggest failed task executor to stop itself - line 1255: // TODO: Improve performance by having an index on the instanceId flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/planner/plan/metadata/FlinkRelMdColumnInterval.scala (4 lines): - line 290: // TODO supports ScalarSqlFunctions.IF - line 291: // TODO supports CAST - line 762: // TODO add more built-in agg functions - line 858: // TODO if column at index position is EuqiJoinKey in a Inner Join, its interval is flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/stream/StreamExecGroupTableAggregate.java (3 lines): - line 127: // TODO: heap state backend do not copy key currently, - line 129: // TODO: copy is not need when state backend is rocksdb, - line 131: // TODO: but other operators do not copy this input field..... flink-table/flink-table-common/src/main/java/org/apache/flink/table/utils/DateTimeUtils.java (3 lines): - line 517: // TODO use offset, better performance - line 1135: // TODO support it - line 1143: // TODO support it flink-runtime/src/main/java/org/apache/flink/runtime/asyncprocessing/operators/windowing/AsyncWindowOperator.java (3 lines): - line 159: * state is not mergeable. TODO: Not support currently. - line 165: * TODO: Not support currently. - line 614: // TODO: support merging window. flink-runtime/src/main/java/org/apache/flink/runtime/rest/handler/legacy/metrics/MetricFetcherImpl.java (3 lines): - line 203: // TODO: Once the old code has been ditched, remove the explicit TaskManager query - line 205: // TODO: and return it as part of requestMetricQueryServiceAddresses. Moreover, - line 207: // TODO: we don't have to explicitly retain the valid TaskManagers, e.g. letting it flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/operations/SqlNodeToOperationConversion.java (3 lines): - line 270: // TODO: all the below conversion logic should be migrated to SqlNodeConverters - line 706: // TODO calc target column list to index array, currently only simple SqlIdentifiers are - line 1277: // TODO calc target column list to index array, currently only simple SqlIdentifiers are flink-runtime/src/main/java/org/apache/flink/runtime/jobmaster/JobMaster.java (3 lines): - line 596: // TODO: This method needs a leader session ID - line 633: // TODO: This method needs a leader session ID - line 1204: // TODO: Distinguish between job termination which should free all slots and a loss of flink-state-backends/flink-statebackend-changelog/src/main/java/org/apache/flink/state/changelog/ChangelogKeyedStateBackend.java (3 lines): - line 662: // TODO: Consider notifying nested state backend about checkpoint abortion (FLINK-25850) - line 882: // TODO: implement its own streamFactory. - line 947: // TODO: this method may change after the ownership PR flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/ForStResourceContainer.java (3 lines): - line 226: // TODO: Fallback to checkpoint directory when checkpoint feature is ready if not - line 478: // TODO Can get filter's config in the future ForSt version, and build new filter use - line 644: // TODO: remove this method after ForSt deal log dir well flink-runtime/src/main/java/org/apache/flink/streaming/runtime/tasks/SourceStreamTask.java (3 lines): - line 131: // TODO - we need to see how to derive those. We should probably not - line 133: // TODO - source's trigger message, but do a handshake in this task - line 135: // TODO - message from the master, and the source's trigger flink-runtime/src/main/java/org/apache/flink/streaming/api/operators/StreamTaskStateInitializerImpl.java (3 lines): - line 514: // TODO currently this does not support local state recovery, so we expect there is only - line 556: // TODO currently this does not support local state recovery, so we expect there is only - line 664: stateName; // TODO since we only support a single named state in raw, this could be flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/stream/StreamExecGroupAggregate.java (3 lines): - line 194: // TODO: heap state backend do not copy key currently, - line 196: // TODO: copy is not need when state backend is rocksdb, - line 198: // TODO: but other operators do not copy this input field..... flink-state-backends/flink-statebackend-rocksdb/src/main/java/org/apache/flink/state/rocksdb/RocksDBIncrementalCheckpointUtils.java (3 lines): - line 295: // TODO: change to null once this API is fixed - line 314: // TODO: change to null once this API is fixed - line 553: // TODO: consider using <= here to avoid that range delete tombstones of flink-filesystems/flink-hadoop-fs/src/main/java/org/apache/flink/runtime/fs/hdfs/HadoopDataInputStream.java (2 lines): - line 148: // TODO: Use org.apache.hadoop.fs.FSDataInputStream#read(ByteBuffer) to improve the - line 176: // TODO: Use org.apache.hadoop.fs.FSDataInputStream#read(long, ByteBuffer) to improve the flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/planner/plan/rules/logical/FlinkSubQueryRemoveRule.scala (2 lines): - line 63: // TODO supports ExistenceJoin - line 166: // TODO: flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/ForStKeyedStateBackendBuilder.java (2 lines): - line 301: // TODO: Remove this after FLINK-37442, if we could properly handl the directory - line 430: // TODO: Support Restoring flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/planner/plan/utils/RelExplainUtil.scala (2 lines): - line 150: // TODO Refactor local&global aggregate name - line 341: // TODO output local/global agg call names like Partial_XXX, Final_XXX flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/planner/plan/metadata/AggCallSelectivityEstimator.scala (2 lines): - line 79: /** Returns whether the given aggCall is supported now TODO supports more */ - line 382: // TODO: It is difficult to support binary comparisons for non-numeric type flink-table/flink-table-runtime/src/main/java/org/apache/flink/table/runtime/functions/table/lookup/fullcache/ReloadTriggerContext.java (2 lines): - line 41: // TODO add processingTime into FunctionContext - line 47: // TODO add watermarks into FunctionContext flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/StateAssignmentOperation.java (2 lines): - line 304: // TODO rewrite based on operator id - line 819: // TODO rewrite based on operator id flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/expressions/converter/ExpressionConverter.java (2 lines): - line 158: // TODO planner supports only milliseconds precision - line 168: // TODO type factory strips the precision, for literals we can be more lenient flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/common/CommonExecMatch.java (2 lines): - line 137: // TODO remove this once it is supported in CEP library - line 143: // TODO remove this once it is supported in CEP library flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/stream/StreamExecIntervalJoin.java (2 lines): - line 370: // TODO: add async version procJoinFunc to use AsyncKeyedCoProcessOperator - line 407: // TODO: add async version rowJoinFunc to use AsyncKeyedCoProcessOperator flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/table/PartitionLoader.java (2 lines): - line 49: *
TODO: src and dest may be on different FS. - line 162: // TODO: We need move to trash when auto-purge is false. flink-runtime/src/main/java/org/apache/flink/runtime/state/AbstractKeyedStateBackend.java (2 lines): - line 418: * TODO: NOTE: This method does a lot of work caching / retrieving states just to update the - line 480: // TODO remove this once heap-based timers are working with RocksDB incremental snapshots! flink-runtime/src/main/java/org/apache/flink/runtime/rest/messages/ClusterConfigurationInfoHeaders.java (2 lines): - line 34: // TODO this REST path is inappropriately set due to legacy design reasons, and ideally should - line 36: // TODO changing it would require corresponding path changes in flink-runtime-web flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/rules/logical/FlinkSemiAntiJoinJoinTransposeRule.java (2 lines): - line 85: // TODO support other join type - line 101: // TODO currently we does not handle this flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/planner/plan/rules/logical/SplitAggregateRule.scala (2 lines): - line 311: // TODO reuse aggregate function, see FLINK-22412 - line 355: // TODO reuse aggregate function, see FLINK-22412 flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/planner/plan/rules/logical/JoinDependentConditionDerivationRule.scala (2 lines): - line 59: // TODO supports more join type - line 103: // TODO Consider whether it is worth doing a filter if we have histogram. flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/planner/codegen/LookupJoinCodeGenerator.scala (2 lines): - line 184: // TODO: filter all records when there is any nulls on the join key, because - line 396: // TODO we should update code splitter's grammar file to accept lambda expressions. flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/utils/LookupJoinUtil.java (2 lines): - line 658: // TODO: support nested lookup keys in the future, - line 735: // TODO support SourceFunctions flink-filesystems/flink-azure-fs-hadoop/src/main/java/org/apache/flink/fs/azurefs/AzureBlobFsRecoverableDataOutputStream.java (2 lines): - line 151: // 2 MB buffers. TODO : Make this configurable - line 174: // TODO : Support intermediate flush? flink-runtime/src/main/java/org/apache/flink/runtime/state/DefaultOperatorStateBackend.java (2 lines): - line 79: *
TODO this map should be moved to a base class once we have proper hierarchy for the - line 284: // TODO with eager registration in place, these checks should be moved to restore() flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/sync/ForStSyncMapState.java (2 lines): - line 333: // TODO make KvStateSerializer key-group aware to save this round trip and key-group - line 773: // TODO: optimization here could be to work with slices and not byte arrays tools/ci/flink-ci-tools/src/main/java/org/apache/flink/tools/ci/licensecheck/NoticeFileChecker.java (2 lines): - line 131: // TODO: this doesn't work for modules requiring a NOTICE that are bundled indirectly - line 132: // TODO: via another non-deployed module flink-core/src/main/java/org/apache/flink/configuration/CheckpointingOptions.java (2 lines): - line 364: *
TODO: remove '@Documentation.ExcludeFromDocumentation' after the feature is implemented. - line 604: // TODO: deprecated flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/planner/codegen/LongHashJoinGenerator.scala (2 lines): - line 58: // TODO decimal and multiKeys support. - line 59: // TODO All HashJoinType support. flink-state-backends/flink-statebackend-rocksdb/src/main/java/org/apache/flink/state/rocksdb/RocksDBMapState.java (2 lines): - line 341: // TODO make KvStateSerializer key-group aware to save this round trip and key-group - line 782: // TODO: optimization here could be to work with slices and not byte arrays flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/planner/plan/utils/AggFunctionFactory.scala (2 lines): - line 85: // TODO supports CountDistinctAggFunction - line 145: // TODO supports SqlCardinalityCountAggFunction flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/filemerging/SubtaskFileMergingManagerRestoreOperation.java (2 lines): - line 92: // TODO support channel state restore for unaligned checkpoint. - line 113: // TODO support changelog keyed state handle flink-table/flink-table-runtime/src/main/java/org/apache/flink/table/runtime/hashtable/BinaryHashTable.java (2 lines): - line 213: // TODO: combine key projection and build side conversion to code gen. - line 771: // TODO do null filter in advance? flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/planner/codegen/GenerateUtils.scala (2 lines): - line 160: // TODO: should we also consider other types? - line 640: // TODO support MULTISET and MAP? flink-table/flink-sql-jdbc-driver/src/main/java/org/apache/flink/table/jdbc/FlinkStatement.java (2 lines): - line 94: // TODO We currently do not support this, but we can't throw a SQLException here because we want - line 101: // TODO We currently do not support this, but we can't throw a SQLException here because we want flink-table/flink-table-common/src/main/java/org/apache/flink/table/legacy/descriptors/Rowtime.java (2 lines): - line 40: // TODO: Put these fields into RowtimeValidator once it is also ported into table-common. - line 41: // TODO: Because these fields have polluted this API class. flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/planner/plan/rules/physical/stream/MiniBatchIntervalInferRule.scala (2 lines): - line 72: // TODO introduce mini-batch window aggregate later - line 136: // TODO: if it is ProcTime mode, we also append a minibatch node for now. flink-table/flink-sql-gateway/src/main/java/org/apache/flink/table/gateway/service/SqlGatewayServiceImpl.java (2 lines): - line 101: // TODO: support the feature in FLINK-27838 - line 210: // TODO: support the feature in FLINK-27838 flink-python/src/main/java/org/apache/beam/runners/fnexecution/state/GrpcStateService.java (2 lines): - line 107: // TODO: Abort in-flight state requests. Flag this processBundleInstructionId as a fail. - line 116: *
TODO: Handle when the client indicates completion or an error on the inbound stream and flink-table/flink-table-runtime/src/main/java/org/apache/flink/table/runtime/operators/rank/AbstractTopNFunction.java (2 lines): - line 50: *
TODO: move util methods and metrics to AbstractTopNHelper after using AbstractTopNHelper in - line 117: // TODO support RANK and DENSE_RANK flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/planner/plan/utils/FlinkRexUtil.scala (2 lines): - line 80: // TODO: to avoid FLINK-37280, require mini batch if ROW_NUMBER is used - line 191: // TODO use more efficient solution to get number of RexCall in CNF node flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/ForStStateBackend.java (2 lines): - line 127: * default. TODO: fallback to checkpoint directory if not configured. - line 458: // TODO: remove after support more snapshot strategy flink-datastream/src/main/java/org/apache/flink/datastream/impl/ExecutionEnvironmentImpl.java (2 lines): - line 288: // TODO Supports accumulator. - line 306: // TODO Supports job listeners. flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/stream/StreamExecExchange.java (2 lines): - line 56: *
TODO Remove this class once FLINK-21224 is finished. - line 109: // TODO Eliminate duplicate keys flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/fs/ByteBufferReadableFSDataInputStream.java (2 lines): - line 69: * accessed by one thread at a time. TODO: Rename all methods about 'readFully' to 'read' when - line 92: * be concurrently accessed by multiple threads. TODO: Support to split this method to other flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/fs/ByteBufferWritableFSDataOutputStream.java (2 lines): - line 123: // TODO: 1. Support to close when sync for some object storages. - line 125: // TODO: 2. Support to handle specific files, e.g. MANIFEST, LOG. flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/stream/StreamExecGroupWindowAggregate.java (2 lines): - line 397: // TODO: EventTimeTumblingGroupWindow should sort the stream on event time - line 417: // TODO: EventTimeTumblingGroupWindow should sort the stream on event time flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/filemerging/FileMergingSnapshotManagerBase.java (2 lines): - line 385: // TODO: FLINK-32091 add io metrics - line 771: // TODO: Determine whether do file sync more wisely. Add an interface to FileSystem if flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/planner/plan/rules/physical/common/CommonLookupJoinRule.scala (2 lines): - line 95: // TODO: find TableSource in FlinkLogicalIntermediateTableScan - line 100: // TODO Support `IS NOT DISTINCT FROM` in the future: FLINK-13509 flink-python/src/main/java/org/apache/beam/runners/fnexecution/control/DefaultJobBundleFactory.java (2 lines): - line 513: // TODO: Consider having BundleProcessor#newBundle take in an OutputReceiverFactory - line 702: // TODO: Wait for executor shutdown? flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/planner/plan/nodes/physical/batch/BatchPhysicalHashAggregateBase.scala (2 lines): - line 74: // TODO use BytesHashMap.BUCKET_SIZE instead of 16 - line 76: // TODO use BytesHashMap.RECORD_EXTRA_LENGTH instead of 8 flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/stream/StreamExecPythonGroupWindowAggregate.java (2 lines): - line 338: // TODO: EventTimeTumblingGroupWindow should sort the stream on event time - line 361: // TODO: EventTimeTumblingGroupWindow should sort the stream on event time flink-runtime/src/main/java/org/apache/flink/runtime/taskexecutor/TaskExecutor.java (2 lines): - line 802: // TODO: Pass config value from user program and do overriding here. - line 1196: // TODO: Filter invalid requests from the resource manager by using the flink-table/flink-table-runtime/src/main/java/org/apache/flink/table/runtime/hashtable/LongHybridHashTable.java (2 lines): - line 50: *
See {@link LongHashPartition}. TODO add min max long filter and bloomFilter to spilled - line 285: // TODO MemoryManager needs to support flexible larger segment, so that the index area flink-state-backends/flink-statebackend-rocksdb/src/main/java/org/apache/flink/state/rocksdb/snapshot/RocksDBFullSnapshotResources.java (2 lines): - line 94: // TODO: better yet, we can do it outside the constructor - line 101: // TODO: was it important that this is a LinkedHashMap flink-table/flink-table-runtime/src/main/java/org/apache/flink/table/runtime/operators/join/temporal/TemporalRowTimeJoinOperator.java (2 lines): - line 104: *
TODO: this could be OrderedMultiMap[Jlong, Row] indexed by row's timestamp, to avoid full - line 112: *
TODO: having `rightState` as an OrderedMapState would allow us to avoid sorting cost once flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/planner/plan/rules/physical/batch/BatchPhysicalJoinRuleBase.scala (2 lines): - line 123: // TODO currently join hint is not supported with semi/anti join - line 191: // TODO currently join hint is not supported with semi/anti join flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/connectors/DynamicSinkUtils.java (2 lines): - line 852: // TODO FLINK-33083 we should not ignore the produced abilities but actually put those into - line 900: // TODO erase the conversion class earlier when dropping legacy code, esp. FLINK-22321 flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/planner/plan/utils/FlinkRelMdUtil.scala (2 lines): - line 675: // TODO reuse FlinkRelMetadataQuery here - line 699: // TODO It's hard to make sure that the normalized key's length is accurate in optimized stage. flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/InputProperty.java (1 line): - line 269: *
TODO This class can be removed once FLINK-21224 is finished. flink-runtime/src/main/java/org/apache/flink/runtime/state/KeyGroupsStateHandle.java (1 line): - line 144: // TODO: for now this ignores that only some key groups might be accessed when reading the flink-table/flink-table-runtime/src/main/java/org/apache/flink/table/runtime/functions/table/lookup/CachingAsyncLookupFunction.java (1 line): - line 96: // TODO: Should implement retry on failure logic as proposed in flink-runtime/src/main/java/org/apache/flink/runtime/io/network/netty/NettyMessage.java (1 line): - line 661: // TODO Directly serialize to Netty's buffer flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/PipelinedApproximateSubpartition.java (1 line): - line 57: * of time then multiple netty worker threads can createReadView at the same time. TODO: This flink-table/flink-table-runtime/src/main/java/org/apache/flink/table/runtime/operators/over/frame/OverWindowFrame.java (1 line): - line 59: * Get next row from iterator. Return null if iterator has no next. TODO Maybe copy is repeated. flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/planner/calcite/PreValidateReWriter.scala (1 line): - line 287: // TODO no suitable error message from current CalciteResource, just use this one temporarily, flink-runtime/src/main/java/org/apache/flink/streaming/api/operators/collect/CollectSinkFunction.java (1 line): - line 272: // TODO this implementation is not very effective, flink-runtime/src/main/java/org/apache/flink/runtime/resourcemanager/slotmanager/DefaultSlotStatusSyncer.java (1 line): - line 192: // TODO If the taskManager does not have enough resource, we flink-formats/flink-orc/src/main/java/org/apache/flink/orc/OrcColumnarRowInputFormat.java (1 line): - line 155: // TODO FLINK-25113 all this partition keys code should be pruned from the orc format, flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/batch/BatchExecOverAggregate.java (1 line): - line 132: // TODO just replace comparator to equaliser flink-table/flink-table-runtime/src/main/java/org/apache/flink/table/runtime/hashtable/LongHashPartition.java (1 line): - line 296: // TODO test Conflict resolution: flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/StandardConvertletTable.java (1 line): - line 548: // TODO: adopt a more conventional definition/instance structure flink-runtime/src/main/java/org/apache/flink/runtime/state/StatePartitionStreamProvider.java (1 line): - line 32: *
TODO use bounded stream that fail fast if the limit is exceeded on corrupted reads. flink-runtime/src/main/java/org/apache/flink/streaming/api/operators/StreamOperatorStateHandler.java (1 line): - line 433: TODO: NOTE: This method does a lot of work caching / retrieving states just to update the namespace. flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/planner/plan/optimize/program/FlinkChangelogModeInferenceProgram.scala (1 line): - line 246: // TODO support early / late fire and then this node may produce update records flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/optimize/program/FlinkRecomputeStatisticsProgram.java (1 line): - line 124: // TODO estimate statistics by selectivity flink-table/flink-table-runtime/src/main/java/org/apache/flink/table/runtime/functions/table/lookup/fullcache/CacheLoader.java (1 line): - line 100: // TODO support metric numCachedBytesGauge flink-runtime/src/main/java/org/apache/flink/streaming/api/operators/AbstractStreamOperatorV2.java (1 line): - line 276: *
TODO: this method can be removed once all timers are moved to be managed by state flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/ForStKeyedStateBackend.java (1 line): - line 476: // TODO: 2024/6/6 wangfeifan - Implement state migration flink-runtime/src/main/java/org/apache/flink/runtime/rest/messages/job/metrics/MetricsAggregationParameter.java (1 line): - line 27: /** TODO: add javadoc. */ flink-table/flink-table-runtime/src/main/java/org/apache/flink/table/runtime/typeutils/RowDataSerializer.java (1 line): - line 188: /** Convert {@link RowData} into {@link BinaryRowData}. TODO modify it to code gen. */ flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/hint/FlinkHintStrategies.java (1 line): - line 96: // TODO semi/anti join with CORRELATE is not supported flink-core/src/main/java/org/apache/flink/api/common/io/BinaryInputFormat.java (1 line): - line 229: // TODO: seek not supported by compressed streams. Will throw exception flink-table/flink-table-runtime/src/main/java/org/apache/flink/table/runtime/operators/deduplicate/utils/DeduplicateFunctionHelper.java (1 line): - line 32: *
TODO utilize the respective helper classes that inherit from an abstract deduplicate function flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/failover/RestartPipelinedRegionFailoverStrategy.java (1 line): - line 116: // TODO: show the task name in the log flink-runtime/src/main/java/org/apache/flink/runtime/io/disk/iomanager/SynchronousBufferFileReader.java (1 line): - line 31: *
TODO Refactor I/O manager setup and refactor this into it flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/processor/ForwardHashExchangeProcessor.java (1 line): - line 171: // TODO This implementation should be updated once FLINK-21224 is finished. flink-state-backends/flink-statebackend-rocksdb/src/main/java/org/apache/flink/state/rocksdb/RocksDBPriorityQueueSetFactory.java (1 line): - line 194: // TODO we implement the simple way of supporting the current functionality, mimicking flink-table/flink-table-runtime/src/main/java/org/apache/flink/table/runtime/typeutils/LegacyLocalDateTimeTypeInfo.java (1 line): - line 35: * TODO: https://issues.apache.org/jira/browse/FLINK-14927 flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/planner/plan/rules/physical/batch/BatchPhysicalSinkRule.scala (1 line): - line 69: // TODO This option is hardcoded to remove the dependency of planner from flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/planner/plan/rules/physical/batch/BatchPhysicalOverAggregateRule.scala (1 line): - line 115: // TODO: split pandas udaf, general python udaf, java/scala udaf into different node flink-clients/src/main/java/org/apache/flink/client/cli/CheckpointOptions.java (1 line): - line 37: // TODO: Support type INCREMENTAL flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/planner/plan/utils/UpdatingPlanChecker.scala (1 line): - line 33: // TODO UpsertStreamTableSink setKeyFields interface should be Array[Array[String]] flink-table/flink-table-runtime/src/main/java/org/apache/flink/table/runtime/functions/table/lookup/CachingLookupFunction.java (1 line): - line 165: // TODO: Should implement retry on failure logic as proposed in FLIP-234 flink-table/flink-sql-client/src/main/java/org/apache/flink/table/client/gateway/ExecutorImpl.java (1 line): - line 569: // TODO: Remove this after the REST Client should allow flink-core/src/main/java/org/apache/flink/core/memory/DataInputDeserializer.java (1 line): - line 72: // TODO: FLINK-8585 handle readonly and other non array based buffers more efficiently flink-runtime/src/main/java/org/apache/flink/runtime/state/restore/FullSnapshotRestoreOperation.java (1 line): - line 305: // TODO this could be aware of keyGroupPrefixBytes and write only flink-state-backends/flink-statebackend-rocksdb/src/main/java/org/apache/flink/state/rocksdb/RocksDBMemoryConfiguration.java (1 line): - line 194: // TODO change the formula once FLINK-15532 resolved. flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/rules/logical/FlinkProjectJoinTransposeRule.java (1 line): - line 75: return; // TODO: support SEMI/ANTI join later flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/planner/plan/rules/logical/FlinkLogicalRankRule.scala (1 line): - line 70: // TODO use the field name specified by user flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/planner/plan/rules/physical/batch/BatchPhysicalLegacySinkRule.scala (1 line): - line 52: // TODO This option is hardcoded to remove the dependency of planner from flink-table/flink-sql-gateway/src/main/java/org/apache/flink/table/gateway/workflow/scheduler/EmbeddedQuartzScheduler.java (1 line): - line 344: // TODO wait for the job to finish flink-runtime/src/main/java/org/apache/flink/streaming/api/operators/StreamOperatorFactoryUtil.java (1 line): - line 80: // TODO: what to do with ProcessingTimeServiceAware? flink-formats/flink-parquet/src/main/java/org/apache/flink/formats/parquet/ParquetVectorizedInputFormat.java (1 line): - line 236: // TODO clip for array,map,row types. flink-formats/flink-orc-nohive/src/main/java/org/apache/flink/orc/nohive/OrcNoHiveColumnarRowInputFormat.java (1 line): - line 66: // TODO FLINK-25113 all this partition keys code should be pruned from the orc format, flink-table/flink-table-api-java/src/main/java/org/apache/flink/table/api/internal/TableEnvironmentImpl.java (1 line): - line 363: // TODO should add a validation, while StreamTableSource is in flink-table-api-java-bridge flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/planner/plan/rules/physical/stream/StreamPhysicalLegacySinkRule.scala (1 line): - line 51: // TODO This option is hardcoded to remove the dependency of planner from flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/rules/logical/ProjectWatermarkAssignerTransposeRule.java (1 line): - line 87: // TODO: support nested projection push down in transpose flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/AuxiliaryConverter.java (1 line): - line 63: // case SESSION_END: // TODO: ? flink-runtime/src/main/java/org/apache/flink/runtime/asyncprocessing/AsyncExecutionController.java (1 line): - line 528: // There must be a bug. TODO: Print necessary debug info in this case. flink-connectors/flink-connector-base/src/main/java/org/apache/flink/connector/base/source/hybrid/HybridSourceReader.java (1 line): - line 219: // TODO: track previous readers splits till checkpoint flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/functions/aggfunctions/LeadLagAggFunction.java (1 line): - line 95: // TODO hack, use the current input reset the buffer value. flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/hint/StateTtlHint.java (1 line): - line 36: *
TODO support agg state ttl hint. flink-python/pyflink/table/udf.py (1 line): - line 601: # TODO: support to configure the python execution environment flink-runtime/src/main/java/org/apache/flink/runtime/state/heap/KeyGroupPartitionedPriorityQueue.java (1 line): - line 165: // TODO consider bulk loading the partitions and "heapify" keyGroupHeap once after all flink-runtime/src/main/java/org/apache/flink/streaming/api/graph/StreamConfig.java (1 line): - line 649: // TODO: could this logic be moved to the user of #setTransitiveChainedTaskConfigs() ? flink-runtime/src/main/java/org/apache/flink/streaming/runtime/tasks/SourceOperatorStreamTask.java (1 line): - line 104: // TODO: should the input be constructed inside the `OperatorChain` class? flink-table/flink-table-api-java/src/main/java/org/apache/flink/table/catalog/listener/CatalogContext.java (1 line): - line 76: * TODO After https://issues.apache.org/jira/browse/FLINK-32427 is finished, we can get flink-table/flink-table-runtime/src/main/java/org/apache/flink/table/runtime/operators/over/NonTimeRangeUnboundedPrecedingFunction.java (1 line): - line 395: // TODO: Optimize by using Binary Search flink-core/src/main/java/org/apache/flink/core/execution/DefaultExecutorServiceLoader.java (1 line): - line 44: // TODO: This code is almost identical to the ClusterClientServiceLoader and its default flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/common/CommonExecLookupJoin.java (1 line): - line 213: // TODO: refactor this into TableSourceTable, once legacy TableSource is removed flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/RoundRobinOperatorStateRepartitioner.java (1 line): - line 87: // TODO: Currently if some tasks is finished, we would rescale the flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/planner/plan/nodes/physical/batch/BatchPhysicalHashJoin.scala (1 line): - line 106: // TODO use BinaryHashBucketArea.RECORD_BYTES instead of 8 flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/planner/plan/rules/physical/stream/StreamPhysicalSinkRule.scala (1 line): - line 68: // TODO This option is hardcoded to remove the dependency of planner from flink-runtime/src/main/java/org/apache/flink/runtime/state/NonClosingCheckpointOutputStream.java (1 line): - line 67: // TODO if we want to support async writes, this call could trigger a callback to the flink-table/flink-table-runtime/src/main/java/org/apache/flink/table/runtime/operators/over/RowTimeRangeBoundedPrecedingFunction.java (1 line): - line 189: // TODO Use timer with namespace to distinguish timers flink-table/flink-table-runtime/src/main/java/org/apache/flink/table/runtime/operators/sort/LimitOperator.java (1 line): - line 26: /** Operator for batch limit. TODO support stopEarly. */ flink-runtime/src/main/java/org/apache/flink/runtime/state/KeyedStateBackend.java (1 line): - line 121: *
TODO: NOTE: This method does a lot of work caching / retrieving states just to update the flink-table/flink-table-runtime/src/main/java/org/apache/flink/table/runtime/operators/join/lookup/KeyedLookupJoinWrapper.java (1 line): - line 61: // TODO to be unified by FLINK-24666 flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/operations/InternalDataStreamQueryOperation.java (1 line): - line 55: // TODO remove this while ResolvedSchema supports fieldNullables flink-yarn/src/main/java/org/apache/flink/yarn/YarnClusterDescriptor.java (1 line): - line 1031: // TODO: server use user main method to generate job graph flink-runtime/src/main/java/org/apache/flink/runtime/state/heap/HeapPriorityQueue.java (1 line): - line 171: // TODO implement shrinking as well? flink-core/src/main/java/org/apache/flink/core/fs/local/LocalRecoverableFsDataOutputStream.java (1 line): - line 199: // TODO how to handle this? flink-datastream/src/main/java/org/apache/flink/datastream/impl/operators/BaseKeyedTwoOutputProcessOperator.java (1 line): - line 41: // TODO Restore this keySet when task initialized from checkpoint. flink-core/src/main/java/org/apache/flink/core/plugin/DirectoryBasedPluginFinder.java (1 line): - line 90: // TODO: This class could be extended to parse exclude-pattern from a optional text files in flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/planner/plan/metadata/FlinkRelMdDistribution.scala (1 line): - line 66: // FIXME transmit one possible distribution. flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/planner/plan/nodes/common/CommonCalc.scala (1 line): - line 59: // TODO use inputRowCnt to compute cpu cost flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/common/CommonExecExchange.java (1 line): - line 34: *
TODO Remove this class once its functionality is replaced by ExecEdge. flink-table/flink-table-runtime/src/main/java/org/apache/flink/table/runtime/functions/table/lookup/fullcache/LookupFullCache.java (1 line): - line 73: // TODO add Configuration into FunctionContext and pass in into LookupFullCache flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/planner/plan/nodes/physical/common/CommonPhysicalLookupJoin.scala (1 line): - line 85: // TODO: refactor this into TableSourceTable, once legacy TableSource is removed flink-table/flink-table-runtime/src/main/java/org/apache/flink/table/runtime/operators/over/ProcTimeRangeBoundedPrecedingFunction.java (1 line): - line 145: // TODO Use timer with namespace to distinguish timers flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/planner/functions/utils/UserDefinedFunctionUtils.scala (1 line): - line 80: // TODO it is not a good way to check singleton. Maybe improve it further. flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/ForStDBMapKeyIterRequest.java (1 line): - line 60: // TODO: Determine whether this is from sync mode, and if so, filter the entries with flink-runtime/src/main/java/org/apache/flink/runtime/state/AsyncKeyedStateBackend.java (1 line): - line 116: // TODO remove this once heap-based timers are working with ForSt incremental snapshots! flink-filesystems/flink-hadoop-fs/src/main/java/org/apache/flink/runtime/fs/hdfs/HadoopRecoverableFsDataOutputStream.java (1 line): - line 289: // TODO how to handle this? flink-runtime/src/main/java/org/apache/flink/runtime/state/ChangelogTaskLocalStateStore.java (1 line): - line 168: // TODO: This is guaranteed by the wrapped backend only using this folder for flink-runtime/src/main/java/org/apache/flink/runtime/asyncprocessing/StateRequestType.java (1 line): - line 35: *
TODO: Serialization and Deserialization. flink-core/src/main/java/org/apache/flink/core/memory/MemorySegmentFactory.java (1 line): - line 139: // TODO: this error handling can be removed in future, flink-runtime/src/main/java/org/apache/flink/streaming/api/operators/InternalTimerServiceAsyncImpl.java (1 line): - line 124: * state-processor-API. TODO: Ensure state-processor-API that only uses sync state API. flink-table/flink-table-runtime/src/main/java/org/apache/flink/table/runtime/typeutils/LegacyTimestampTypeInfo.java (1 line): - line 35: * TODO: https://issues.apache.org/jira/browse/FLINK-14927 flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/planner/codegen/EqualiserCodeGenerator.scala (1 line): - line 158: // TODO merge ScalarOperatorGens.generateEquals. flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/Execution.java (1 line): - line 1386: // TODO For some tests this could be a problem when querying too early if all flink-table/flink-sql-gateway/src/main/java/org/apache/flink/table/gateway/service/materializedtable/MaterializedTableManager.java (1 line): - line 551: // TODO: Set minibatch related optimization options. flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/planner/plan/rules/physical/batch/BatchPhysicalWindowAggregateRule.scala (1 line): - line 160: // TODO aggregate include projection now, so do not provide new trait will be safe flink-runtime/src/main/java/org/apache/flink/streaming/api/operators/InternalTimeServiceManager.java (1 line): - line 85: *
TODO: This can be removed once heap-based timers are integrated with RocksDB flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/planner/plan/utils/AggregateUtil.scala (1 line): - line 945: // TODO support adaptive local hash agg while agg call with filter condition. flink-runtime/src/main/java/org/apache/flink/streaming/runtime/io/StreamTwoInputProcessorFactory.java (1 line): - line 118: // TODO: extract method flink-runtime/src/main/java/org/apache/flink/streaming/runtime/watermarkstatus/HeapPriorityQueue.java (1 line): - line 238: // TODO implement shrinking as well? flink-runtime/src/main/java/org/apache/flink/runtime/scheduler/adaptivebatch/AdaptiveBatchScheduler.java (1 line): - line 684: // TODO: In the future, if we want to load balance for job vertices whose flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/planner/plan/rules/physical/stream/PullUpWindowTableFunctionIntoWindowAggregateRule.scala (1 line): - line 69: // TODO: this can be supported in the future by referencing them as a RexFieldVariable flink-runtime/src/main/java/org/apache/flink/runtime/blob/TransientBlobCache.java (1 line): - line 47: *
TODO: make this truly transient by returning file streams to a local copy with the remote flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/planner/plan/utils/ExpandUtil.scala (1 line): - line 199: // TODO only need output duplicate fields for the row against 'regular' aggregates flink-runtime/src/main/java/org/apache/flink/streaming/api/operators/collect/CollectResultFetcher.java (1 line): - line 248: // TODO a more proper retry strategy? flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/planner/codegen/FunctionCodeGenerator.scala (1 line): - line 138: // TODO more functions flink-metrics/flink-metrics-otel/src/main/java/org/apache/flink/traces/otel/OpenTelemetryTraceReporter.java (1 line): - line 100: // TODO: not yet supported flink-runtime-web/web-dashboard/src/app/pages/job/checkpoints/subtask/job-checkpoints-subtask.component.ts (1 line): - line 54: // FIXME This type-asserts that pre / next are a specific subtype. flink-runtime/src/main/java/org/apache/flink/runtime/webmonitor/WebMonitorEndpoint.java (1 line): - line 908: // TODO: Remove once the Yarn proxy can forward all REST verbs flink-core/src/main/java/org/apache/flink/api/common/io/FileInputFormat.java (1 line): - line 258: // TODO The job-submission web interface passes empty args (and thus empty flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/planner/plan/rules/physical/batch/BatchPhysicalLimitRule.scala (1 line): - line 78: rexBuilder.makeLiteral(limit, intType, true), // TODO use Long type for limit ? flink-runtime-web/web-dashboard/src/app/pages/job/timeline/job-timeline.component.ts (1 line): - line 167: // FIXME scrollIntoViewIfNeeded is a non-standard extension and will not work everywhere flink-table/flink-table-runtime/src/main/java/org/apache/flink/table/runtime/operators/over/NonBufferOverWindowOperator.java (1 line): - line 113: // TODO Reform AggsHandleFunction.getValue instead of use JoinedRowData. Multilayer flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/planner/plan/rules/physical/batch/BatchPhysicalSortAggRule.scala (1 line): - line 88: // TODO aggregate include projection now, so do not provide new trait will be safe flink-python/pyflink/fn_execution/datastream/process/timerservice_impl.py (1 line): - line 39: TODO: Use InternalTimerServiceImpl instead. flink-table/flink-table-common/src/main/java/org/apache/flink/table/types/logical/utils/LogicalTypeCasts.java (1 line): - line 608: || // TODO drop this line once we remove legacy types flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/functions/sql/FlinkSqlOperatorTable.java (1 line): - line 852: // TODO: the return type of TO_TIMESTAMP should be TIMESTAMP(9), flink-table/flink-table-runtime/src/main/java/org/apache/flink/table/runtime/operators/join/window/utils/WindowJoinHelper.java (1 line): - line 199: *
TODO FLINK-37106 consider extracting common methods in different WindowJoinProcessor flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/planner/codegen/calls/FunctionGenerator.scala (1 line): - line 284: // TODO: fixme if CALCITE-3199 fixed flink-table/flink-table-api-java/src/main/java/org/apache/flink/table/operations/command/DescribeJobOperation.java (1 line): - line 47: // TODO: We may need to migrate the execution for ShowJobsOperation from SQL Gateway flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/planner/plan/metadata/FlinkRelMdColumnUniqueness.scala (1 line): - line 504: // TODO get uniqueKeys from TableSchema of TableSource flink-runtime/src/main/java/org/apache/flink/runtime/state/KeyedBackendSerializationProxy.java (1 line): - line 62: // TODO allow for more (user defined) compression formats + backwards compatibility story. flink-table/flink-table-runtime/src/main/java/org/apache/flink/table/runtime/operators/aggregate/window/processors/UnsliceSyncStateWindowAggProcessor.java (1 line): - line 104: // TODO support allowedLateness flink-runtime/src/main/java/org/apache/flink/runtime/scheduler/SchedulerBase.java (1 line): - line 913: // TODO: Introduce a more general solution to mark times when flink-formats/flink-parquet/src/main/java/org/apache/flink/formats/parquet/ParquetColumnarRowInputFormat.java (1 line): - line 165: // TODO FLINK-25113 all this partition keys code should be pruned from the parquet format, flink-formats/flink-orc-nohive/src/main/java/org/apache/flink/orc/nohive/shim/OrcNoHiveShim.java (1 line): - line 73: // TODO configure filters flink-formats/flink-hadoop-bulk/src/main/java/org/apache/flink/formats/hadoop/bulk/committer/HadoopRenameFileCommitter.java (1 line): - line 94: // TODO: in the future we may also need to check if the target file exists. flink-table/flink-table-api-java/src/main/java/org/apache/flink/table/api/internal/StaticResultProvider.java (1 line): - line 59: // TODO: This is a temporary solution, the long-term solution is to use flink-table/flink-table-api-java/src/main/java/org/apache/flink/table/catalog/DataTypeFactoryImpl.java (1 line): - line 162: // TODO validate implementation class of structured types when converting from LogicalType flink-state-backends/flink-statebackend-rocksdb/src/main/java/org/apache/flink/state/rocksdb/restore/RocksDBHandle.java (1 line): - line 191: // TODO with eager state registration in place, check here for serializer migration flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/utils/RankProcessStrategy.java (1 line): - line 198: // TODO: choose a set of primary key flink-table/flink-sql-jdbc-driver/src/main/java/org/apache/flink/table/jdbc/FlinkDatabaseMetaData.java (1 line): - line 113: // TODO Flink will support SHOW DATABASES LIKE statement in FLIP-297, this method will be flink-formats/flink-parquet/src/main/java/org/apache/flink/formats/parquet/vector/ParquetColumnarRowSplitReader.java (1 line): - line 190: // TODO clip for array,map,row types. flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/planner/codegen/calls/StringCallGen.scala (1 line): - line 42: *
TODO Need to rewrite most of the methods here, calculated directly on the StringData instead flink-table/flink-table-runtime/src/main/java/org/apache/flink/table/runtime/operators/join/lookup/AsyncLookupJoinRunner.java (1 line): - line 188: *
TODO: code generate a whole JoinedRowResultFuture in the future flink-runtime/src/main/java/org/apache/flink/runtime/deployment/TaskDeploymentDescriptorFactory.java (1 line): - line 143: // TODO Refactor after removing the consumers from the intermediate result partitions flink-runtime/src/main/java/org/apache/flink/runtime/state/heap/HeapSnapshotStrategy.java (1 line): - line 111: // TODO: this code assumes that writing a serializer is threadsafe, we flink-runtime/src/main/java/org/apache/flink/runtime/operators/sort/AbstractMergeIterator.java (1 line): - line 165: // TODO: Decide which side to spill and which to block! flink-table/flink-table-api-scala/src/main/scala/org/apache/flink/table/api/codegen/TypeAnalyzer.scala (1 line): - line 385: // TODO: use this once 2.10 is no longer supported flink-runtime/src/main/java/org/apache/flink/runtime/scheduler/adaptivebatch/AdaptiveExecutionHandlerFactory.java (1 line): - line 39: *
TODO: Currently, adaptive execution cannot work with batch job progress recovery, so we flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/datatransfer/ForStStateDataTransfer.java (1 line): - line 66: // TODO: Add ConfigOption replace this field after ForSt checkpoint implementation stable flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/planner/codegen/agg/batch/HashWindowCodeGenerator.scala (1 line): - line 530: // TODO refine this. Is it possible to reuse grouping key projection? flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/planner/plan/utils/ColumnIntervalUtil.scala (1 line): - line 165: // TODO add more case flink-runtime/src/main/java/org/apache/flink/streaming/api/operators/collect/CollectCoordinationResponse.java (1 line): - line 76: // TODO the following two methods might be not so efficient flink-table/flink-table-runtime/src/main/java/org/apache/flink/table/runtime/typeutils/LegacyInstantTypeInfo.java (1 line): - line 35: * TODO: https://issues.apache.org/jira/browse/FLINK-14927 flink-runtime/src/main/java/org/apache/flink/runtime/minicluster/MiniCluster.java (1 line): - line 1499: // TODO: That we still have to call something like this is a crime against humanity flink-core/src/main/java/org/apache/flink/util/CompressionUtils.java (1 line): - line 243: // TODO: support setting the permission without following links flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/ExecEdge.java (1 line): - line 65: // TODO once FLINK-21224 [Remove BatchExecExchange and StreamExecExchange, and replace their flink-table/flink-table-common/src/main/java/org/apache/flink/table/dataview/NullSerializer.java (1 line): - line 34: // TODO move this class to org.apache.flink.table.runtime.typeutils flink-runtime/src/main/java/org/apache/flink/streaming/runtime/io/StreamMultipleInputProcessor.java (1 line): - line 150: // TODO: isAvailable() can be a costly operation (checking volatile). If one of flink-python/src/main/java/org/apache/flink/table/runtime/typeutils/serializers/python/RowDataSerializer.java (1 line): - line 88: // TODO: support RowData natively in Python, then we can eliminate the redundant flink-core-api/src/main/java/org/apache/flink/api/common/state/AppendingState.java (1 line): - line 61: *
If null is passed in, the behaviour is undefined (implementation related). TODO: An flink-state-backends/flink-statebackend-rocksdb/src/main/java/org/apache/flink/state/rocksdb/RocksDBMemoryControllerUtils.java (1 line): - line 117: // TODO use strict capacity limit until FLINK-15532 resolved flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/restore/ForStHandle.java (1 line): - line 165: // TODO: with eager state registration in place, check here for serializer migration flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/planner/plan/utils/JoinUtil.scala (1 line): - line 125: // TODO create NonEquiJoinInfo directly flink-table/flink-table-runtime/src/main/java/org/apache/flink/table/runtime/context/ExecutionContext.java (1 line): - line 30: // TODO add create state method. flink-runtime/src/main/java/org/apache/flink/streaming/api/operators/AbstractStreamOperator.java (1 line): - line 321: *
TODO: this method can be removed once all timers are moved to be managed by state flink-runtime/src/main/java/org/apache/flink/runtime/state/KeyedStateCheckpointOutputStream.java (1 line): - line 58: // TODO if we want to support async writes, this call could trigger a callback to the flink-runtime/src/main/java/org/apache/flink/runtime/state/StateSnapshotRestore.java (1 line): - line 25: /** Interface to deal with state snapshot and restore of state. TODO find better name? */ flink-runtime/src/main/java/org/apache/flink/runtime/asyncprocessing/AbstractStateIterator.java (1 line): - line 131: // TODO optimization: Avoid results copy. flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/sync/ForStDBPriorityQueueSetFactory.java (1 line): - line 193: // TODO we implement the simple way of supporting the current functionality, mimicking flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointFailureManager.java (1 line): - line 150: // TODO: Try to get rid of checking nullability of executionAttemptID because false value of flink-runtime/src/main/java/org/apache/flink/streaming/api/operators/StreamingRuntimeContext.java (1 line): - line 256: // TODO: Reconstruct this after StateManager is ready in FLIP-410. flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/planner/codegen/sort/SortCodeGenerator.scala (1 line): - line 453: // TODO: support normalize key for non-compact timestamp flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/planner/plan/metadata/FlinkRelMdSelectivity.scala (1 line): - line 272: // TODO only effects BatchPhysicalRel instead of all RelNode now flink-runtime/src/main/java/org/apache/flink/streaming/runtime/tasks/StreamTask.java (1 line): - line 297: /** TODO it might be replaced by the global IO executor on TaskManager level future. */ flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/planner/typeutils/LegacyDataViewUtils.scala (1 line): - line 139: // TODO supports SortedMapView flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/batch/BatchExecHashJoin.java (1 line): - line 263: // TODO decimal and multiKeys support and all HashJoinType support. flink-python/src/main/java/org/apache/flink/streaming/api/operators/python/embedded/AbstractEmbeddedPythonFunctionOperator.java (1 line): - line 150: // TODO: Support batches invoking. flink-state-backends/flink-statebackend-rocksdb/src/main/java/org/apache/flink/state/rocksdb/RocksDBResourceContainer.java (1 line): - line 302: // TODO Can get filter's config in the future RocksDB version, and build new filter use flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/processor/MultipleInputNodeCreationProcessor.java (1 line): - line 415: // TODO If all kinds of one input operator support OFCG, we can remove this flink-state-backends/flink-statebackend-rocksdb/src/main/java/org/apache/flink/state/rocksdb/RocksDBKeyedStateBackend.java (1 line): - line 1081: // TODO maybe filterOrTransform only for k/v states flink-formats/flink-csv/src/main/java/org/apache/flink/formats/csv/CsvToRowDataConverters.java (1 line): - line 241: // TODO: FLINK-17525 support millisecond and nanosecond flink-python/pyflink/fn_execution/table/state_data_view.py (1 line): - line 35: # TODO: infer the coder from the input types and output type of the built-in functions flink-runtime/src/main/java/org/apache/flink/runtime/blob/TransientBlobService.java (1 line): - line 38: *
TODO: change API to not rely on local files but return {@link InputStream} objects flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/fs/filemapping/FileMappingManager.java (1 line): - line 290: // TODO: Reconsider the directory deletion strategy in FLINK-37442. flink-runtime/src/main/java/org/apache/flink/runtime/state/SharedStateRegistryImpl.java (1 line): - line 252: // TODO This is a temporary fix for a problem during flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/sync/ForStSyncKeyedStateBackendBuilder.java (1 line): - line 576: // TODO: Support Restoring flink-table/flink-table-api-java/src/main/java/org/apache/flink/table/operations/command/ShowJobsOperation.java (1 line): - line 36: // TODO: We may need to migrate the execution for ShowJobsOperation from SQL Gateway flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/connectors/DynamicSourceUtils.java (1 line): - line 185: // TODO: isUpsertSource(), isSourceChangeEventsDuplicate() flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/rules/logical/FlinkAggregateExpandDistinctAggregatesRule.java (1 line): - line 538: // TODO supports more aggCalls (currently only supports COUNT) flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/sync/ForStSyncKeyedStateBackend.java (1 line): - line 920: // TODO maybe filterOrTransform only for k/v states flink-runtime/src/main/java/org/apache/flink/runtime/io/network/netty/PartitionRequestQueue.java (1 line): - line 91: // TODO This could potentially have a bad performance impact as in the flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/processor/AdaptiveJoinProcessor.java (1 line): - line 173: // TODO: If job recovery for adaptive execution is supported in the future, this logic will flink-connectors/flink-connector-base/src/main/java/org/apache/flink/connector/base/source/hybrid/HybridSourceSplitEnumerator.java (1 line): - line 360: // TODO: not start enumerator until readers are ready? flink-table/flink-table-runtime/src/main/java/org/apache/flink/table/runtime/operators/over/BufferDataOverWindowOperator.java (1 line): - line 138: // TODO Reform AggsHandleFunction.getValue instead of use JoinedRowData. Multilayer flink-table/flink-table-runtime/src/main/java/org/apache/flink/table/runtime/operators/aggregate/window/processors/WindowAggProcessorBase.java (1 line): - line 114: *
TODO support early fire / late file to produce changelog result. flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/failover/ExecutionFailureHandler.java (1 line): - line 169: // TODO: replace with reporting as event once events are supported. flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/ForStIncrementalCheckpointUtils.java (1 line): - line 364: // TODO: consider using <= here to avoid that range delete tombstones of flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/rules/logical/FlinkFilterJoinRule.java (1 line): - line 137: // TODO - add logic to derive additional filters. E.g., from flink-datastream/src/main/java/org/apache/flink/datastream/impl/operators/KeyedTwoInputBroadcastProcessOperator.java (1 line): - line 50: // TODO Restore this keySet when task initialized from checkpoint. flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/common/CommonExecTableSourceScan.java (1 line): - line 177: // TODO: Push down watermark strategy to source scan flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/planner/plan/metadata/FlinkRelMdSize.scala (1 line): - line 433: // TODO after time/date => int, timestamp => long, this estimate value should update flink-runtime/src/main/java/org/apache/flink/runtime/jobmaster/DefaultSlotPoolServiceSchedulerFactory.java (1 line): - line 173: // TODO: It will be assigned by the corresponding logic after flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/planner/calcite/FlinkPlannerImpl.scala (1 line): - line 226: // TODO currently, it is a relatively hacked way to tell converter flink-datastream/src/main/java/org/apache/flink/datastream/impl/operators/BaseKeyedTwoInputNonBroadcastProcessOperator.java (1 line): - line 42: // TODO Restore this keySet when task initialized from checkpoint. flink-table/flink-table-runtime/src/main/java/org/apache/flink/table/runtime/operators/over/frame/OffsetOverFrame.java (1 line): - line 109: // TODO refactor it. flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/planner/codegen/calls/ScalarOperatorGens.scala (1 line): - line 338: // TODO: in flip-154, we should support implicit type conversion between (var)char and numeric, flink-core/src/main/java/org/apache/flink/api/common/io/DelimitedInputFormat.java (1 line): - line 375: // TODO: Add sampling for unsplittable files. Right now, only compressed text files are flink-core/src/main/java/org/apache/flink/core/state/StateFutureImpl.java (1 line): - line 35: * version of this implementation, we wrap {@link CompletableFuture} for simplification. TODO: flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/serde/RexNodeJsonDeserializer.java (1 line): - line 228: // TODO: Since 1.18.0 nothing is serialized to FIELD_NAME_CONTAINS_NULL. flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/rules/logical/FlinkAggregateRemoveRule.java (1 line): - line 81: // TODO supports more AggregateCalls flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/ForStMemoryConfiguration.java (1 line): - line 194: // TODO change the formula once FLINK-15532 resolved. flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/hybrid/tiered/tier/remote/RemoteTierMasterAgent.java (1 line): - line 77: // TODO we could list the remote path to get all result partitions. Currently, this method flink-runtime/src/main/java/org/apache/flink/runtime/rest/messages/job/JobDetailsInfo.java (1 line): - line 74: // TODO: For what do we need this??? flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/sync/AbstractForStSyncState.java (1 line): - line 132: // TODO make KvStateSerializer key-group aware to save this round trip and key-group flink-state-backends/flink-statebackend-rocksdb/src/main/java/org/apache/flink/state/rocksdb/AbstractRocksDBState.java (1 line): - line 136: // TODO make KvStateSerializer key-group aware to save this round trip and key-group flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/planner/plan/metadata/FlinkRelMdModifiedMonotonicity.scala (1 line): - line 585: // TODO supports temporal table function join flink-runtime/src/main/java/org/apache/flink/runtime/scheduler/adaptive/AdaptiveScheduler.java (1 line): - line 1485: // TODO: replace with reporting as event once events are supported. flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/src/AbstractFileSource.java (1 line): - line 143: // TODO - in the next cleanup pass, we should try to remove the need to "wrap unchecked" flink-table/flink-table-runtime/src/main/java/org/apache/flink/table/runtime/types/PlannerTypeUtils.java (1 line): - line 152: // TODO: add other precision types here in the future flink-runtime/src/main/java/org/apache/flink/streaming/api/graph/StreamingJobGraphGenerator.java (1 line): - line 144: // TODO Currently, we construct a new thread pool for the compilation of each job. In the flink-datastream/src/main/java/org/apache/flink/datastream/impl/operators/BaseKeyedProcessOperator.java (1 line): - line 37: // TODO Restore this keySet when task initialized from checkpoint. flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/planner/plan/utils/IntervalJoinUtil.scala (1 line): - line 474: // TODO support SEMI/ANTI joinSplitAggregateRuleTest flink-runtime/src/main/java/org/apache/flink/streaming/runtime/tasks/mailbox/MailboxExecutorImpl.java (1 line): - line 116: // TODO: FLINK-35051 we shouldn't interrupt for every mail, but only for the time sensitive flink-python/pyflink/table/types.py (1 line): - line 1567: # TODO: type cast (such as int -> long) flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/fs/ForStFlinkFileSystem.java (1 line): - line 72: // TODO: make it configurable flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/planner/codegen/agg/ImperativeAggCodeGen.scala (1 line): - line 192: // TODO handle accumulate has primitive parameters flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/operations/RichTableSourceQueryOperation.java (1 line): - line 35: * A {@link TableSourceQueryOperation} with {@link FlinkStatistic} and qualifiedName. TODO this flink-table/flink-sql-client/src/main/java/org/apache/flink/table/client/cli/CliStrings.java (1 line): - line 326: // TODO: Remove this after RestClientException supports to get RootCause. flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/planner/plan/optimize/StreamCommonSubGraphBasedOptimizer.scala (1 line): - line 87: // TODO FLINK-24048: Move changeLog inference out of optimizing phase flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/planner/plan/nodes/physical/batch/BatchPhysicalWindowTableFunction.scala (1 line): - line 51: // TODO set semantic window (such as session window) require other Dam Behavior flink-runtime/src/main/java/org/apache/flink/runtime/scheduler/ExecutionGraphHandler.java (1 line): - line 88: // TODO: Consider to support reporting initialization stats without checkpointing flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/stream/StreamExecDeduplicate.java (1 line): - line 372: // TODO: create async version flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/batch/BatchExecSortLimit.java (1 line): - line 145: // TODO If input is ordered, there is no need to use the heap. flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/ForStMemoryControllerUtils.java (1 line): - line 116: // TODO use strict capacity limit until FLINK-15532 resolved flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/restore/ForStIncrementalRestoreOperation.java (1 line): - line 229: // TODO: use new metric name, such as "TransferStateDurationMs" flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/planner/plan/metadata/FlinkRelMdUniqueGroups.scala (1 line): - line 317: // TODO drop some nonGroupingCols base on FlinkRelMdColumnUniqueness#areColumnsUnique(window) flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointCoordinator.java (1 line): - line 2045: // TODO: Introduce a more general solution to the race condition flink-table/flink-table-runtime/src/main/java/org/apache/flink/table/runtime/typeutils/WindowKeySerializer.java (1 line): - line 110: *
TODO so, we can remove this return value for simplifying interface. flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/planner/plan/utils/RexNodeExtractor.scala (1 line): - line 419: // TODO support SqlTrimFunction.Flag