streams/src/main/java/org/apache/kafka/streams/processor/internals/TaskManager.java (6 lines): - line 272: // TODO: this step should be removed as we complete migrating to state updater - line 426: // TODO: move task-corrupted and task-migrated out of the public errors package since they are internal errors and always be - line 1521: // TODO: change type to `StreamTask` - line 1661: // TODO: should record the task ids when handling this exception - line 1772: * TODO: after we complete switching to state updater, we could rename this function as allRunningTasks - line 2065: // TODO KAFKA-12648: for now just swallow the exception to avoid interfering with the other topologies connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/DistributedHerder.java (4 lines): - line 483: // TODO: Some of these can be performed concurrently or even optimized away entirely. - line 574: } catch (WakeupException e) { // FIXME should not be WakeupException - line 2457: // TODO: As an optimization, some task config updates could avoid a rebalance. In particular, single-task - line 2703: // TODO: We need to at least commit task offsets, but if we could commit offsets & pause them instead of streams/src/main/java/org/apache/kafka/streams/processor/internals/StoreChangelogReader.java (4 lines): - line 461: // TODO: we always try to restore as a batch when some records are accumulated, which may result in - line 497: // TODO (?) If we cannot fetch records during restore, should we trigger `task.timeout.ms` ? - line 498: // TODO (?) If we cannot fetch records for standby task, should we trigger `task.timeout.ms` ? - line 846: // TODO K9113: when TaskType.GLOBAL is added we need to modify this connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/RestServer.java (4 lines): - line 77: // TODO: This should not be so long. However, due to potentially long rebalances that may have to wait a full - line 194: // TODO: do we need this? - line 246: // TODO: we need to check if these listeners are same as 'listeners' - line 247: // TODO: the following code assumes that they are different streams/src/main/java/org/apache/kafka/streams/processor/internals/Tasks.java (3 lines): - line 49: // TODO: convert to Stream/StandbyTask when we remove TaskManager#StateMachineTask with mocks - line 63: // TODO: convert to Stream/StandbyTask when we remove TaskManager#StateMachineTask with mocks - line 267: // TODO: change return type to `StreamTask` streams/src/main/java/org/apache/kafka/streams/processor/internals/TaskExecutionMetadata.java (3 lines): - line 38: // TODO: implement exponential backoff, for now we just wait 5s - line 67: // TODO implement error handling/backoff for non-named topologies (needs KIP) - line 123: // TODO: during long task backoffs, pause the full topology to avoid it getting out of sync streams/src/main/java/org/apache/kafka/streams/processor/internals/GlobalStateManagerImpl.java (3 lines): - line 183: // TODO (KAFKA-12887): we should not trigger user's exception handler for illegal-argument but always - line 293: // TODO with https://issues.apache.org/jira/browse/KAFKA-10315 we can just call - line 385: // TODO with https://issues.apache.org/jira/browse/KAFKA-10315 we can just call streams/src/main/java/org/apache/kafka/streams/state/internals/CachedStateStore.java (2 lines): - line 32: * TODO: this is a hacky workaround for now, should be removed when we decouple caching with emitting - line 42: * TODO: this is a hacky workaround for now, should be removed when we decouple caching with emitting streams/src/main/java/org/apache/kafka/streams/processor/internals/TaskExecutor.java (2 lines): - line 99: // TODO: enable regardless of whether using named topologies - line 105: // TODO consolidate TimeoutException retries with general error handling release/release.py (2 lines): - line 361: # TODO: Many of these suggested validation steps could be automated - line 366: # TODO: Can we close the staging repository via a REST API since we clients/src/main/java/org/apache/kafka/common/requests/FetchResponse.java (2 lines): - line 246: // TODO: remove as a part of KAFKA-12410 - line 254: // TODO: remove as a part of KAFKA-12410 streams/src/main/java/org/apache/kafka/streams/processor/internals/StandbyTaskCreator.java (2 lines): - line 74: // TODO: convert to StandbyTask when we remove TaskManager#StateMachineTask with mocks - line 116: * TODO: we pass in the new input partitions to validate if they still match, core/src/main/scala/kafka/server/ReplicaManager.scala (2 lines): - line 1194: * TODO: fix this by implementing topic IDs. */ - line 2300: * TODO: the above may need to be fixed later clients/src/main/java/org/apache/kafka/common/compress/Lz4BlockOutputStream.java (2 lines): - line 125: // TODO write uncompressed content size, update flg.validate() - line 184: // TODO implement content checksum, update flg.validate() core/src/main/scala/kafka/server/KafkaApis.scala (2 lines): - line 1680: // TODO: The current append API makes doing separate writes per producerId a little easier, but it would - line 3974: // TODO: remove resolvedResponseData method when sizeOf can take a data object. streams/src/main/java/org/apache/kafka/streams/processor/internals/InternalTopologyBuilder.java (2 lines): - line 195: // TODO KAFKA-13283: once we enforce all configs be passed in when constructing the topology builder then we can set - line 332: // TODO: we should lift this requirement in the future connect/runtime/src/main/java/org/apache/kafka/connect/runtime/standalone/StandaloneHerder.java (2 lines): - line 546: // TODO: To get any real benefit from the backing store abstraction, we should move some of - line 559: // TODO: move connector configuration update handling here to be consistent with streams/src/main/java/org/apache/kafka/streams/processor/internals/ActiveTaskCreator.java (2 lines): - line 143: // TODO: convert to StreamTask when we remove TaskManager#StateMachineTask with mocks - line 202: * TODO: we pass in the new input partitions to validate if they still match, streams/src/main/java/org/apache/kafka/streams/processor/internals/tasks/DefaultTaskExecutor.java (2 lines): - line 158: // TODO: enable regardless of whether using named topologies - line 164: // TODO consolidate TimeoutException retries with general error handling generator/src/main/java/org/apache/kafka/message/checker/Unifier.java (1 line): - line 200: // TODO: we should probably validate the versions, etc. settings of the common struct. streams/src/main/java/org/apache/kafka/streams/processor/internals/StreamTask.java (1 line): - line 444: // TODO: this should be removed after we decouple caching with emitting streams/src/main/java/org/apache/kafka/streams/StreamsConfig.java (1 line): - line 187: // TODO: currently we cannot get the full topic configurations and hence cannot allow topic configs without the prefix, core/src/main/scala/kafka/server/DynamicBrokerConfig.scala (1 line): - line 329: // TODO: addBrokerReconfigurable(new DynamicListenerConfig(controller)) streams/src/main/java/org/apache/kafka/streams/state/internals/ChangeLoggingListValueBytesStore.java (1 line): - line 49: // TODO: here we always return null so that deser would not fail. release/templates.py (1 line): - line 173: * Build a sample against jars in the staging repo: (TODO: Can we get a temporary URL before "closing" the staged artifacts?) core/src/main/scala/kafka/server/AbstractFetcherThread.scala (1 line): - line 780: // TODO: use fetchTierStateMachine.maybeAdvanceState when implementing async tiering logic in KAFKA-13560 core/src/main/java/kafka/server/share/SharePartition.java (1 line): - line 1672: // TODO: Maybe we can club the continuous offsets here. connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/resources/ConnectorPluginsResource.java (1 line): - line 76: // TODO: improve once plugins are allowed to be added/removed during runtime. streams/src/main/java/org/apache/kafka/streams/state/internals/MemoryLRUCache.java (1 line): - line 52: private boolean restoring = false; // TODO: this is a sub-optimal solution to avoid logging during restoration. streams/src/main/java/org/apache/kafka/streams/processor/internals/ProcessorStateManager.java (1 line): - line 376: // TODO (KAFKA-12887): we should not trigger user's exception handler for illegal-argument but always shell/src/main/java/org/apache/kafka/shell/glob/GlobComponent.java (1 line): - line 129: // TODO: handle character ranges core/src/main/scala/kafka/server/metadata/BrokerMetadataPublisher.scala (1 line): - line 315: // log.cleaner.enable is true. TODO: improve this (see KAFKA-13610) connect/runtime/src/main/java/org/apache/kafka/connect/runtime/Worker.java (1 line): - line 417: // TODO: This control flow is awkward. Push task config generation into WorkerConnector class? streams/src/main/java/org/apache/kafka/streams/processor/internals/StreamsPartitionAssignor.java (1 line): - line 1162: // TODO: once KAFKA-10078 is resolved we can leave it to the client to trigger this rebalance raft/src/main/java/org/apache/kafka/raft/KafkaRaftClient.java (1 line): - line 1579: // FIXME: `completionTimeMs`, which can be null streams/src/main/java/org/apache/kafka/streams/processor/internals/Task.java (1 line): - line 62: * +---> | Suspended (3) | ----+ | //TODO Suspended(3) could be removed after we've stable on KIP-429 connect/mirror/src/main/java/org/apache/kafka/connect/mirror/OffsetSyncStore.java (1 line): - line 183: // TODO: consider batching updates so that this copy can be performed less often for high-volume sync topics. streams/src/main/java/org/apache/kafka/streams/processor/internals/DefaultStateUpdater.java (1 line): - line 348: // TODO: we can let the exception encode the actual corrupted changelog partitions and only storage/src/main/java/org/apache/kafka/server/log/remote/metadata/storage/RemoteLogMetadataCache.java (1 line): - line 104: // TODO We are not clearing the entry for epoch when RemoteLogLeaderEpochState becomes empty. This will be addressed connect/runtime/src/main/java/org/apache/kafka/connect/storage/KafkaStatusBackingStore.java (1 line): - line 284: // TODO: retry more gracefully and not forever core/src/main/scala/kafka/cluster/Partition.scala (1 line): - line 317: @volatile private var _topicId: Option[Uuid] = None // TODO: merge topicPartition and _topicId into TopicIdPartition once TopicId persist in most of the code by KAFKA-16212 streams/src/main/java/org/apache/kafka/streams/processor/internals/StateUpdater.java (1 line): - line 224: // TODO: We would still return true if all active tasks to be restored streams/src/main/java/org/apache/kafka/streams/kstream/internals/KStreamSessionWindowAggregate.java (1 line): - line 221: // TODO: consolidate SessionWindow with TimeWindow to merge common functions storage/src/main/java/org/apache/kafka/storage/internals/log/LogOffsetMetadata.java (1 line): - line 29: //TODO KAFKA-14484 remove once UnifiedLog has been moved to the storage module connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerSinkTask.java (1 line): - line 181: // FIXME Kafka needs to add a timeout parameter here for us to properly obey the timeout streams/src/main/java/org/apache/kafka/streams/TopologyConfig.java (1 line): - line 316: // TODO KAFKA-13283: eventually we should always have the topology props override the global ones regardless streams/src/main/java/org/apache/kafka/streams/processor/internals/RecordCollector.java (1 line): - line 83: // TODO: after we have done KAFKA-9088 we should just add this function streams/src/main/java/org/apache/kafka/streams/kstream/internals/KGroupedStreamImpl.java (1 line): - line 169: // TODO: remove this when we do a topology-incompatible release streams/src/main/java/org/apache/kafka/streams/kstream/internals/TimeWindowedKStreamImpl.java (1 line): - line 84: // TODO: remove this when we do a topology-incompatible release connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/IncrementalCooperativeAssignor.java (1 line): - line 407: // TODO: make it independent streams/src/main/java/org/apache/kafka/streams/processor/internals/StateManager.java (1 line): - line 58: // TODO: we can remove this when consolidating global state manager into processor state manager clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerCoordinator.java (1 line): - line 617: * TODO: this is a hack and not something we want to support long-term unless we push regex into the protocol streams/src/main/java/org/apache/kafka/streams/processor/internals/metrics/StreamsMetricsImpl.java (1 line): - line 513: // TODO: In future, we could use thread local maps since each thread will exclusively access the set of keys metadata/src/main/java/org/apache/kafka/image/ClientQuotasImage.java (1 line): - line 129: // TODO: this is O(N). We should add indexing here to speed it up. See KAFKA-13022. clients/src/main/java/org/apache/kafka/common/compress/Lz4BlockInputStream.java (1 line): - line 173: in.getInt(); // TODO: verify this content checksum trogdor/src/main/java/org/apache/kafka/trogdor/coordinator/NodeManager.java (1 line): - line 201: // TODO: eventually think about putting tasks into a bad state as a result of core/src/main/scala/kafka/server/BrokerServer.scala (1 line): - line 338: * TODO: move this action queue to handle thread so we can simplify concurrency handling connect/transforms/src/main/java/org/apache/kafka/connect/transforms/Cast.java (1 line): - line 61: // TODO: Currently we only support top-level field casting. Ideally we could use a dotted notation in the spec to clients/src/main/java/org/apache/kafka/common/serialization/Serdes.java (1 line): - line 202: // TODO: we can also serializes objects of type T using generic Java serialization by default clients/src/main/java/org/apache/kafka/clients/consumer/internals/CommitRequestManager.java (1 line): - line 1135: // TODO: Optimization question: Do we need to retry all partitions upon a single partition error? streams/src/main/java/org/apache/kafka/streams/kstream/internals/SessionWindowedKStreamImpl.java (1 line): - line 84: // TODO: remove this when we do a topology-incompatible release streams/src/main/java/org/apache/kafka/streams/kstream/internals/CogroupedStreamAggregateBuilder.java (1 line): - line 187: // TODO: We do not have other emit policies for co-group yet streams/src/main/java/org/apache/kafka/streams/state/internals/ListValueStore.java (1 line): - line 75: // TODO: here we always return null so that deser would not fail. clients/src/main/java/org/apache/kafka/clients/consumer/internals/FetchRequestManager.java (1 line): - line 119: // TODO: move the logic to poll to handle signal close connect/mirror/src/main/java/org/apache/kafka/connect/mirror/OffsetSyncWriter.java (1 line): - line 173: // TODO: share common implementation to enforce this relationship streams/src/main/java/org/apache/kafka/streams/kstream/internals/InternalStreamsBuilder.java (1 line): - line 407: // TODO we only merge source nodes if the subscribed topic(s) are an exact match, so it's still not streams/src/main/java/org/apache/kafka/streams/kstream/internals/graph/TableSourceNode.java (1 line): - line 112: // TODO: rewrite this part to use Topology.addReadOnlyStateStore() instead streams/src/main/java/org/apache/kafka/streams/processor/internals/StreamThread.java (1 line): - line 1171: // TODO: we should record the restore latency and its relative time spent ratio after