buildSrc/src/main/groovy/org/elasticsearch/hadoop/gradle/BuildPlugin.groovy (10 lines): - line 200: // TODO: Should we ditch this in favor of just using the built in exporting configurations? all three artifact types have them now - line 301: // TODO: May not be needed on all itests - line 337: // TODO: Remove all root project distribution logic. It should exist in a separate dist project. - line 400: // TODO: Are these better to be set on just the jar or do these make sense to be on all jars (jar, javadoc, source)? - line 428: // TODO: Remove when root project does not handle distribution - line 478: // TODO: Remove when root project does not handle distribution - line 505: // TODO: Assemble is being configured to make javadoc and sources jars no matter what due to the withX() methods above. Is this even required in that case? - line 548: // TODO: Is this still required on modern Eclipse versions? - line 604: // TODO: Should this adhoc component configuration work be done in the SparkVariantPlugin? - line 816: // TODO: Should this be the case? It is in Elasticsearch, but we may have to update some CI jobs? buildSrc/src/main/groovy/org/elasticsearch/hadoop/gradle/fixture/hadoop/HadoopClusterFormationTasks.groovy (4 lines): - line 308: // TODO - This is a later item for CI stability - line 317: // TODO: Switch logic if a service is ever not a tar distribution - line 349: // TODO: Extra Config Files - line 389: // TODO Test on Windows buildSrc/src/main/java/org/elasticsearch/hadoop/gradle/buildtools/DependencyLicensesTask.java (4 lines): - line 115: // TODO: we should be able to default this to eg compile deps, but we need to move the licenses - line 237: // TODO: why do we support suffix of LICENSE *and* LICENSE.txt?? - line 338: // TODO: shouldn't have to trim, sha files should not have trailing newline - line 375: // try the other suffix...TODO: get rid of this, just support ending in .txt mr/src/main/java/org/elasticsearch/hadoop/rest/commonshttp/CommonsHttpTransport.java (4 lines): - line 302: // TODO: Limit this by hosts and ports - line 323: // TODO: Limit this by hosts and ports - line 377: // TODO: Limit this by hosts and ports - line 379: // TODO: This should just pass in the user provider instead of getting the user principal at this point. buildSrc/src/main/groovy/org/elasticsearch/hadoop/gradle/fixture/hadoop/InstanceInfo.groovy (3 lines): - line 130: // TODO make maxwait configurable - line 180: // TODO: Test on windows to see if this actually works - line 273: // TODO Eventually support Windows buildSrc/src/main/java/org/elasticsearch/hadoop/gradle/scala/SparkVariantPlugin.java (2 lines): - line 285: // TODO: address deprecated configuration names - line 287: // TODO compile only buildSrc/src/main/groovy/org/elasticsearch/hadoop/gradle/fixture/hadoop/tasks/SparkApp.groovy (2 lines): - line 132: String commandName = 'spark-submit' // TODO: Windows? - line 184: // TODO: Eventually support standalone or local buildSrc/src/main/groovy/org/elasticsearch/hadoop/gradle/buildtools/AntFixture.groovy (2 lines): - line 156: // TODO: change this to a loop? - line 188: * TODO: this could be removed if we do use our own ProcessBuilder and pump output from the process mr/src/main/java/org/elasticsearch/hadoop/handler/impl/elasticsearch/ElasticsearchHandler.java (1 line): - line 261: // TODO: look at collecting these stats some other way later. mr/src/main/java/org/elasticsearch/hadoop/rest/bulk/BulkProcessor.java (1 line): - line 412: // TODO: Perhaps open an issue to limit the expansion of a single byte array (for repeated rewrite-retries) mr/src/main/java/org/elasticsearch/hadoop/rest/RestService.java (1 line): - line 353: // TODO applyAliasMetaData should be called in order to ensure that the count are exact (alias filters and routing may change the number of documents) mr/src/main/java/org/elasticsearch/hadoop/mr/security/EsTokenIdentifier.java (1 line): - line 136: // TODO: Does not support multiple clusters yet buildSrc/src/main/groovy/org/elasticsearch/hadoop/gradle/fixture/hadoop/tasks/HadoopMRJob.groovy (1 line): - line 150: String commandName = 'yarn' // TODO: or yarn.cmd for Windows hive/src/main/java/org/elasticsearch/hadoop/hive/EsHiveOutputFormat.java (1 line): - line 74: // TODO: check whether a proper Reporter can be passed in spark/sql-30/src/itest/scala/org/elasticsearch/spark/integration/AbstractScalaEsSparkSQL.scala (1 line): - line 1671: // TODO: Available in 5.6, but we only track major version ids in the connector. spark/core/src/itest/scala/org/elasticsearch/spark/integration/AbstractScalaEsSpark.scala (1 line): - line 312: // TODO: Available in 5.6, but we only track major version ids in the connector. hive/src/main/java/org/elasticsearch/hadoop/hive/EsHiveInputFormat.java (1 line): - line 68: // TODO: can this be delegated? mr/src/main/java/org/elasticsearch/hadoop/mr/security/TokenUtil.java (1 line): - line 68: // TODO: Should we extend this to basic authentication at some point? buildSrc/src/main/groovy/org/elasticsearch/hadoop/gradle/IntegrationBuildPlugin.groovy (1 line): - line 62: // TODO: Swap this out with exposing those jars as artifacts to be consumed in a dist project. buildSrc/src/main/groovy/org/elasticsearch/hadoop/gradle/fixture/hadoop/tasks/DfsCopy.groovy (1 line): - line 130: String commandName = 'hdfs' // TODO: or hdfs.cmd for Windows mr/src/main/java/org/elasticsearch/hadoop/mr/EsInputFormat.java (1 line): - line 79: // TODO: can this be computed easily? hive/src/main/java/org/elasticsearch/hadoop/hive/EsStorageHandler.java (1 line): - line 84: //TODO: add metahook support mr/src/main/java/org/elasticsearch/hadoop/security/KeystoreWrapper.java (1 line): - line 48: // TODO: Eventually support password protected keystores when Elasticsearch does. mr/src/main/java/org/elasticsearch/hadoop/serialization/handler/write/impl/SerializationEventConverter.java (1 line): - line 35: // TODO: toString doesn't reliably render the record's contents. buildSrc/src/main/groovy/org/elasticsearch/hadoop/gradle/fixture/ElasticsearchFixturePlugin.groovy (1 line): - line 101: // TODO: Remove this when this is the default in 7 hive/src/main/java/org/elasticsearch/hadoop/hive/HiveValueWriter.java (1 line): - line 88: // TODO: handle non-strings hive/src/main/java/org/elasticsearch/hadoop/hive/HiveBytesConverter.java (1 line): - line 60: // TODO: add dedicated optimization mr/src/main/java/org/elasticsearch/hadoop/util/encoding/HttpEncodingTools.java (1 line): - line 87: // TODO: Potentially fix the plus signs that come out of encoding to be "%20" spark/sql-30/src/main/scala/org/elasticsearch/spark/sql/DefaultSource.scala (1 line): - line 640: // TODO: Problematic. It's possible that the version is not ever discovered and set before this is needed. spark/core/src/main/scala/org/elasticsearch/spark/serialization/ReflectionUtils.scala (1 line): - line 72: // TODO: this is a hack since we expect the field declaration order to be according to the source but there's no guarantee mr/src/main/java/org/elasticsearch/hadoop/util/ecs/HostData.java (1 line): - line 82: // TODO: Should these privileged blocks be moved further up the call stack? mr/src/main/java/org/elasticsearch/hadoop/serialization/bulk/AbstractBulkFactory.java (1 line): - line 509: // TODO This does not compact a list of objects - it is COMPILLING it, and changing the structure. Make this more explicit hive/src/main/java/org/elasticsearch/hadoop/hive/EsSerDe.java (1 line): - line 147: // TODO: can compute serialize stats but not deserialized ones