api/src/main/java/org/apache/iceberg/expressions/VariantExpressionUtil.java (3 lines): - line 31: // TODO: Implement PhysicalType.TIME - line 32: // TODO: Implement PhysicalType.TIMESTAMPNTZ_NANO and PhysicalType.TIMESTAMPTZ_NANO - line 33: // TODO: Implement PhysicalType.UUID core/src/main/java/org/apache/iceberg/puffin/PuffinFormat.java (3 lines): - line 111: // TODO requires LZ4 frame compressor, e.g. - line 133: // TODO requires LZ4 frame decompressor, e.g. - line 153: // TODO implement ZstdDecompressor.getDecompressedSize for ByteBuffer to avoid copying core/src/main/java/org/apache/iceberg/io/BaseTaskWriter.java (2 lines): - line 161: // TODO attach the previous row if has a positional-delete row schema in appender factory. - line 190: // TODO attach the previous row if has a positional-delete row schema in appender factory. parquet/src/main/java/org/apache/iceberg/parquet/ParquetFilters.java (2 lines): - line 45: // TODO: handle AlwaysFalse.INSTANCE - line 223: // TODO: this needs to convert to handle BigDecimal and UUID api/src/main/java/org/apache/iceberg/transforms/Bucket.java (2 lines): - line 178: // TODO: small ranges can be projected. - line 193: // TODO: need to translate not(eq(...)) into notEq in expressions api/src/main/java/org/apache/iceberg/expressions/StrictMetricsEvaluator.java (2 lines): - line 72: // TODO: detect the case where a column is missing from the file using file's max field id. - line 470: // TODO: Handle cases that definitely cannot match, such as notStartsWith("x") when the bounds mr/src/main/java/org/apache/iceberg/mr/mapreduce/IcebergInputFormat.java (2 lines): - line 147: // TODO add a filter parser to get rid of Serialization - line 304: // TODO: We could give a more accurate progress based on records read from the file. arrow/src/main/java/org/apache/iceberg/arrow/vectorized/VectorizedArrowReader.java (2 lines): - line 268: // TODO: Possibly use the uncompressed page size info to set the initial capacity - line 364: // TODO: Possibly use the uncompressed page size info to set the initial capacity parquet/src/main/java/org/apache/iceberg/parquet/ParquetIO.java (2 lines): - line 45: // TODO: use reflection to avoid depending on classes from iceberg-hadoop - line 46: // TODO: use reflection to avoid depending on classes from hadoop core/src/main/java/org/apache/iceberg/encryption/EncryptionUtil.java (1 line): - line 44: // TODO: Add KMS implementations flink/v2.0/flink/src/main/java/org/apache/iceberg/flink/data/FlinkParquetReaders.java (1 line): - line 369: // TODO: need a unit test to write-read-validate decimal via FlinkParquetWrite/Reader spark/v3.4/spark/src/main/java/org/apache/iceberg/spark/ParquetReaderType.java (1 line): - line 34: *

TODO: Implement {@link org.apache.comet.parquet.SupportsComet} in SparkScan to convert Spark core/src/main/java/org/apache/iceberg/util/SnapshotUtil.java (1 line): - line 380: // TODO: recover the schema by reading previous metadata files parquet/src/main/java/org/apache/iceberg/parquet/ParquetValueWriters.java (1 line): - line 495: // TODO: make sure this definition level is correct data/src/main/java/org/apache/iceberg/data/DeleteFilter.java (1 line): - line 294: // TODO: support adding nested columns. this will currently fail when finding nested columns to parquet/src/main/java/org/apache/iceberg/parquet/ParquetReadSupport.java (1 line): - line 117: // TODO: this breaks when columns are reordered. flink/v1.19/flink/src/main/java/org/apache/iceberg/flink/source/StreamingReaderOperator.java (1 line): - line 96: // TODO Replace Java serialization with Avro approach to keep state compatibility. flink/v1.20/flink/src/main/java/org/apache/iceberg/flink/data/FlinkPlannedAvroReader.java (1 line): - line 103: // TODO: should this pass expected so that struct.get can reuse containers? orc/src/main/java/org/apache/iceberg/orc/OrcFileAppender.java (1 line): - line 57: @SuppressWarnings("unused") // Currently used in tests TODO remove this redundant field core/src/main/java/org/apache/iceberg/io/SortedPosDeleteWriter.java (1 line): - line 109: // TODO Flush buffer based on the policy that checking whether whole heap memory size exceed the core/src/main/java/org/apache/iceberg/rest/RESTTableOperations.java (1 line): - line 157: // TODO: ensure that the HTTP client lib passes HTTP client errors to the error handler spark/v3.5/spark/src/main/java/org/apache/iceberg/spark/procedures/RewriteDataFilesProcedure.java (1 line): - line 153: // TODO: we need to allow this in future when SparkAction has handling for this. spark/v3.5/spark-runtime/src/integration/java/org/apache/iceberg/spark/SmokeTest.java (1 line): - line 41: // TODO Update doc example so that it can actually be run, modifications were required for this flink/v1.19/flink/src/main/java/org/apache/iceberg/flink/data/FlinkParquetReaders.java (1 line): - line 369: // TODO: need a unit test to write-read-validate decimal via FlinkParquetWrite/Reader spark/v3.5/spark/src/main/java/org/apache/iceberg/spark/data/vectorized/VectorizedSparkOrcReaders.java (1 line): - line 356: // TODO: Is it okay to assume that (precision,scale) parameters == (precision,scale) of the spark/v3.4/spark/src/main/java/org/apache/iceberg/spark/data/SparkValueWriters.java (1 line): - line 104: // TODO: direct conversion from string to byte buffer parquet/src/main/java/org/apache/iceberg/parquet/PageIterator.java (1 line): - line 244: // TODO: May want to change this so that this class is not dictionary-aware. flink/v2.0/flink/src/main/java/org/apache/iceberg/flink/source/IcebergTableSource.java (1 line): - line 194: // TODO: support nested projection core/src/main/java/org/apache/iceberg/variants/PrimitiveWrapper.java (1 line): - line 211: // TODO: use short string when possible spark/v3.4/spark/src/main/java/org/apache/iceberg/spark/source/SparkMicroBatchStream.java (1 line): - line 371: // TODO : use readLimit provided in function param, the readLimits are derived from flink/v1.20/flink/src/main/java/org/apache/iceberg/flink/source/StreamingReaderOperator.java (1 line): - line 96: // TODO Replace Java serialization with Avro approach to keep state compatibility. spark/v3.4/spark/src/main/java/org/apache/iceberg/spark/procedures/RewriteDataFilesProcedure.java (1 line): - line 153: // TODO: we need to allow this in future when SparkAction has handling for this. core/src/main/java/org/apache/iceberg/rest/RESTSessionCatalog.java (1 line): - line 616: // TODO: rename to LoadNamespaceResponse? spark/v3.5/spark/src/main/java/org/apache/iceberg/spark/data/SparkValueWriters.java (1 line): - line 104: // TODO: direct conversion from string to byte buffer flink/v1.20/flink/src/main/java/org/apache/iceberg/flink/FlinkCatalog.java (1 line): - line 667: // TODO modify this after Flink support partition transform. spark/v3.4/spark-extensions/src/main/scala/org/apache/spark/sql/catalyst/analysis/RewriteUpdateTable.scala (1 line): - line 198: // TODO: avoid executing the condition for each column spark/v3.5/spark/src/main/java/org/apache/iceberg/spark/actions/SnapshotTableSparkAction.java (1 line): - line 127: // TODO: Check the dest table location does not overlap with the source table location spark/v3.5/spark/src/main/java/org/apache/iceberg/spark/source/SparkScanBuilder.java (1 line): - line 281: // TODO: enable aggregate push down for partition col group by expression flink/v2.0/flink/src/main/java/org/apache/iceberg/flink/sink/IcebergSink.java (1 line): - line 220: // TODO Support small file compaction flink/v1.19/flink/src/main/java/org/apache/iceberg/flink/sink/IcebergSink.java (1 line): - line 219: // TODO Support small file compaction spark/v3.4/spark/src/main/java/org/apache/iceberg/spark/data/vectorized/VectorizedSparkOrcReaders.java (1 line): - line 356: // TODO: Is it okay to assume that (precision,scale) parameters == (precision,scale) of the core/src/main/java/org/apache/iceberg/RewriteTablePathUtil.java (1 line): - line 132: // TODO: update partition statistics file paths core/src/main/java/org/apache/iceberg/SchemaUpdate.java (1 line): - line 645: // TODO: What happens if there are no fields left? spark/v3.4/spark/src/main/java/org/apache/iceberg/spark/source/SparkScanBuilder.java (1 line): - line 281: // TODO: enable aggregate push down for partition col group by expression flink/v1.19/flink/src/main/java/org/apache/iceberg/flink/source/IcebergTableSource.java (1 line): - line 194: // TODO: support nested projection flink/v1.19/flink/src/main/java/org/apache/iceberg/flink/FlinkCatalog.java (1 line): - line 667: // TODO modify this after Flink support partition transform. spark/v3.4/spark-runtime/src/integration/java/org/apache/iceberg/spark/SmokeTest.java (1 line): - line 41: // TODO Update doc example so that it can actually be run, modifications were required for this spark/v3.5/spark/src/main/java/org/apache/iceberg/spark/data/SparkPlannedAvroReader.java (1 line): - line 104: // TODO: should this pass expected so that struct.get can reuse containers? aws/src/integration/java/org/apache/iceberg/aws/s3/TestS3FileIO.java (1 line): - line 281: * configurations it still just returns a general bucket TODO Update to use S3Mock when it behaves core/src/main/java/org/apache/iceberg/puffin/PuffinReader.java (1 line): - line 128: // TODO inspect blob offsets and coalesce read regions close to each other flink/v1.20/flink/src/main/java/org/apache/iceberg/flink/data/FlinkParquetReaders.java (1 line): - line 369: // TODO: need a unit test to write-read-validate decimal via FlinkParquetWrite/Reader spark/v3.4/spark/src/main/java/org/apache/iceberg/spark/actions/SnapshotTableSparkAction.java (1 line): - line 127: // TODO: Check the dest table location does not overlap with the source table location flink/v2.0/flink/src/main/java/org/apache/iceberg/flink/source/StreamingReaderOperator.java (1 line): - line 100: // TODO Replace Java serialization with Avro approach to keep state compatibility. spark/v3.5/spark/src/main/java/org/apache/iceberg/spark/ParquetReaderType.java (1 line): - line 34: *

TODO: Implement {@link org.apache.comet.parquet.SupportsComet} in SparkScan to convert Spark flink/v2.0/flink/src/main/java/org/apache/iceberg/flink/data/FlinkPlannedAvroReader.java (1 line): - line 103: // TODO: should this pass expected so that struct.get can reuse containers? spark/v3.5/spark/src/main/java/org/apache/iceberg/spark/source/StagedSparkTable.java (1 line): - line 39: // TODO: clean up core/src/main/java/org/apache/iceberg/avro/ValueWriter.java (1 line): - line 30: return Stream.empty(); // TODO will populate in following PRs spark/v3.4/spark/src/main/java/org/apache/iceberg/spark/data/SparkPlannedAvroReader.java (1 line): - line 104: // TODO: should this pass expected so that struct.get can reuse containers? flink/v1.19/flink/src/main/java/org/apache/iceberg/flink/data/FlinkPlannedAvroReader.java (1 line): - line 103: // TODO: should this pass expected so that struct.get can reuse containers? flink/v1.20/flink/src/main/java/org/apache/iceberg/flink/sink/IcebergSink.java (1 line): - line 219: // TODO Support small file compaction api/src/main/java/org/apache/iceberg/encryption/EncryptingFileIO.java (1 line): - line 116: // TODO: is the length correct for the encrypted file? It may be the length of the plaintext parquet/src/main/java/org/apache/iceberg/parquet/Parquet.java (1 line): - line 1361: // TODO: should not need to get the schema to push down before opening the file. kafka-connect/kafka-connect-transforms/src/main/java/org/debezium/connector/mongodb/transforms/MongoDataConverter.java (1 line): - line 213: // TODO FIX Cyclomatic Complexity is 30 (max allowed is 12). [CyclomaticComplexity] api/src/main/java/org/apache/iceberg/transforms/Truncate.java (1 line): - line 211: // TODO: for integers, can this return the original predicate? spark/v3.5/spark/src/main/java/org/apache/iceberg/spark/source/SparkMicroBatchStream.java (1 line): - line 371: // TODO : use readLimit provided in function param, the readLimits are derived from spark/v3.4/spark/src/main/java/org/apache/iceberg/spark/source/StagedSparkTable.java (1 line): - line 39: // TODO: clean up api/src/main/java/org/apache/iceberg/expressions/InclusiveMetricsEvaluator.java (1 line): - line 76: // TODO: detect the case where a column is missing from the file using file's max field id. hive-metastore/src/main/java/org/apache/iceberg/hive/HiveTableOperations.java (1 line): - line 50: * TODO we should be able to extract some more commonalities to BaseMetastoreTableOperations to flink/v1.20/flink/src/main/java/org/apache/iceberg/flink/source/IcebergTableSource.java (1 line): - line 194: // TODO: support nested projection flink/v2.0/flink/src/main/java/org/apache/iceberg/flink/FlinkCatalog.java (1 line): - line 666: // TODO modify this after Flink support partition transform. api/src/main/java/org/apache/iceberg/expressions/UnboundPredicate.java (1 line): - line 202: // TODO: translate truncate(col) == value to startsWith(value) parquet/src/main/java/org/apache/iceberg/parquet/TripleWriter.java (1 line): - line 24: // TODO: should definition level be included, or should it be part of the column? core/src/main/java/org/apache/iceberg/avro/AvroMetrics.java (1 line): - line 35: // TODO will populate in following PRs if datum writer is a MetricsAwareDatumWriter