tensorflow/tensorflow/stream_executor/rocm/rocm_dnn.cc (9 lines): - line 528: // ROCM TODO: retrieve MIOpen version with its API - line 1616: // FIXME: Check if MIOpen can switch dynamically change accumulator type - line 2204: // TODO: remove this when MIOpen is ready. - line 2342: // ROCM TODO: cell_size is used to decide hidden size when output project - line 2344: // ROCM TODO: batch_size is used in dynamic persistent RNN algorithm and is - line 3007: // ROCM TODO: refactor cc_major / cc_minor - line 3037: // ROCM TODO: refactor cc_major / cc_minor - line 3052: // ROCM TODO: refactor cc_major / cc_minor - line 3300: // ROCM TODO implement this operation blaze-benchmark/cmake/cmake/share/cmake-3.6/Modules/FindwxWidgets.cmake (5 lines): - line 122: # FIXME: check this and provide a correct sample usage... - line 170: # FIXME: This and all the DBG_MSG calls should be removed after the - line 276: # FIXME: What if both regex libs are available. regex should be - line 958: # FIXME: Document that the input variables will be cleared. - line 990: # FIXME: Add documentation here... tensorflow/tensorflow/contrib/mpi_collectives/kernels/mpi_ops.cc (5 lines): - line 572: // TODO: Use the global mpi_global state variable instead of a local one - line 577: // TODO: Ensure that this is operating correctly. The background thread - line 624: // TODO: MOVE MESSAGE TABLE INITIALIZATION TO LIBRARY LOAD! - line 634: // TODO: Eliminate the need for thread sleep by making all activity - line 811: // TODO: Change this to a Tensorflow thread tensorflow/tensorflow/contrib/mpi_collectives/mpi_ops.cc (5 lines): - line 573: // TODO: Use the global mpi_global state variable instead of a local one - line 578: // TODO: Ensure that this is operating correctly. The background thread - line 625: // TODO: MOVE MESSAGE TABLE INITIALIZATION TO LIBRARY LOAD! - line 635: // TODO: Eliminate the need for thread sleep by making all activity - line 812: // TODO: Change this to a Tensorflow thread tensorflow/tensorflow/c/c_api_experimental.h (5 lines): - line 61: // TODO: Migrate to TF_CreateConfig() below. - line 148: // TODO: remove this API in favor of the next one. - line 160: // TODO: Remove this function once we migrate away from using session. - line 164: // TODO: Retire this API in favor of the next one. - line 181: // TODO: consider folding the 2 APIs below into the ones above. tensorflow/tensorflow/compiler/mlir/lite/ir/tfl_ops.cc (5 lines): - line 88: // TODO: Need to handle overflow/underflow cases. - line 109: // TODO: support the general broadcast behavior. - line 164: // TODO: support the general broadcast behavior. - line 205: // TODO: support other attribute kinds - line 241: /// TODO: Extend this function to handle integral tensor for ops like tensorflow/tensorflow/stream_executor/rocm/rocm_blas.cc (4 lines): - line 1702: // ROCM TODO: properly implement the interface - line 1712: // ROCM TODO: properly implement the interface - line 1723: // ROCM TODO: properly implement the interface - line 1729: // ROCM TODO: properly implement the interface tensorflow/tensorflow/lite/kernels/detection_postprocess.cc (4 lines): - line 182: // TODO (chowdhery): Make it a scalar when available - line 251: // TODO (chowdhery): check float - line 258: // TODO (chowdhery): check float - line 406: // TODO (chowdhery): Remove the dynamic allocation and replace it tensorflow/tensorflow/core/kernels/mkl_fused_batch_norm_op.cc (4 lines): - line 309: // TODO: reserved_space_3: temp mem to hold - line 324: // TODO: type for weights? - line 837: // TODO: This parameter functionality is not implemented on CPU. - line 1216: // TODO: FusedBatchNormV3 has an additional output that is used to blaze-benchmark/cmake/cmake/share/cmake-3.6/Modules/Compiler/GNU-CXX-FeatureTests.cmake (4 lines): - line 89: # TODO: If features are ever recorded for GNU 4.3, there should possibly - line 96: # TODO: Should be supported by GNU 4.3 - line 104: # TODO: Should be supported since GNU 3.4? - line 106: # TODO: Should be supported forever? tensorflow/tensorflow/core/util/gpu_device_functions.h (4 lines): - line 135: // ROCM TODO add ROCM implementation - line 417: // ROCM TODO: check if HIP should be changed to cope with more types - line 549: // FIXME: remove the workaround below once bug is fixed. - line 643: // ROCM TODO support GpuAtomicAdd for std::complex<> blaze-benchmark/cmake/cmake/share/cmake-3.6/Modules/CPackCygwin.cmake (3 lines): - line 16: # The Cygwin patch number. FIXME: This documentation is incomplete. - line 20: # The Cygwin patch file. FIXME: This documentation is incomplete. - line 24: # The Cygwin build script. FIXME: This documentation is incomplete. tensorflow/tensorflow/compiler/mlir/tensorflow/transforms/functional_control_flow_to_cfg.cc (3 lines): - line 51: // TODO: Right now we just handle zero-D tensors of boolean values. - line 52: // FIXME: This is almost all wrong, but is a placeholder to unblock the one - line 303: // TODO: Use PatternRewriter to eliminate these function control flow ops. tensorflow/tensorflow/core/util/gpu_launch_config.h (3 lines): - line 171: // ROCM TODO re-enable this after hipOccupancyMaxPotentialBlockSize is - line 219: // ROCM TODO re-enable this after hipOccupancyMaxActiveBlocksPerMultiprocessor - line 331: // ROCM TODO re-enable this after hipOccupancyMaxPotentialBlockSize is blaze-benchmark/cmake/cmake/share/cmake-3.6/Modules/ExternalProject.cmake (3 lines): - line 1669: # TODO: Perhaps file:// should be copied to download dir before extraction. - line 1872: # TODO: Should download and extraction be different steps? - line 2132: # TODO: Make sure external projects use the proper compiler tensorflow/tensorflow/contrib/hadoop/kernels/hadoop_dataset_ops.cc (3 lines): - line 45: // TODO (yongtang): Add more class name support. - line 62: // TODO (yongtang): Add compression support. - line 111: // TODO (yongtang): Expand supported format. tensorflow/tensorflow/stream_executor/cuda/cuda_dnn.cc (3 lines): - line 1127: // TODO: allow the user to choose an algorithm. - line 1189: // TODO: For now, we only use cudnnRNN**Ex API to process padded inputs. - line 3153: // After the TODO below is fixed, users should almost always use fp32 compute tensorflow/tensorflow/c/c_api_experimental.cc (3 lines): - line 336: // TODO: retrieve the device string via TFE_ContextListDevices() - line 346: // TODO: use NAMED_TENSOR_QUEUE_CAPACITY in S4TF compiler. - line 354: // TODO: consider making this an unknown shape. tensorflow/tensorflow/core/util/mkl_util.h (3 lines): - line 1553: /// TODO: this is a faster path with reorder primitive cache compared with - line 1695: /// TODO: this is a faster path with reorder primitive cache compared with - line 1762: /// TODO: this is a faster path with reorder primitive cache compared with blaze-benchmark/cmake/cmake/share/cmake-3.6/Modules/Compiler/Clang-CXX-FeatureTests.cmake (3 lines): - line 18: # TODO: Should be supported by Clang 3.1 - line 22: # TODO: Should be supported by Clang 2.9 - line 32: # TODO: Should be supported forever? tensorflow/tensorflow/core/kernels/svd_op_gpu.cu.cc (3 lines): - line 193: // TODO: what is with complex values? - line 238: // TODO: can the two cases (MgeqN and MlessN) be simplified, - line 365: // TODO: add support for complex types tensorflow/tensorflow/contrib/learn/python/learn/datasets/synthetic.py (2 lines): - line 57: TODO: - line 132: TODO: tensorflow/tensorflow/contrib/verbs/rdma_mgr.cc (2 lines): - line 40: // TODO: use WorkerSessionForSession - line 286: // TODO: This is to fix the 'invalid use of member in static member function tensorflow/tensorflow/core/platform/cloud/gcs_file_system.cc (2 lines): - line 132: // TODO: DO NOT use a hardcoded path - line 423: // TODO: to make it safer, outfile_ should be constructed from an FD tensorflow/tensorflow/core/kernels/cwise_op_div.cc (2 lines): - line 31: // ROCM TODO: re-enable complex64 / complex128 after compiler fix - line 41: // ROCM TODO: re-enable complex64 / complex128 after compiler fix tensorflow/tensorflow/core/kernels/cuda_sparse.cc (2 lines): - line 44: // TODO: reuse with cuda_solvers - line 173: // TODO: reuse with cuda_solvers tensorflow/tensorflow/core/kernels/pooling_ops_common.h (2 lines): - line 76: // TODO (yongtang): Remove MaxPoolingOp and use MaxPoolingV2Op, - line 291: // ROCm TODO: add support __vmaxs4 on ROCm blaze-benchmark/cmake/cmake/share/cmake-3.6/Modules/FindwxWindows.cmake (2 lines): - line 324: ## TODO: Really search for each lib, then decide for - line 414: ## opengl/glu: TODO/FIXME: better use FindOpenGL.cmake here blaze-benchmark/cmake/cmake/share/cmake-3.6/Modules/FindBoost.cmake (2 lines): - line 289: # FIXME: This probably should be set for both cases - line 505: # TODO at least Boost_DEBUG here? blaze-benchmark/cmake/cmake/share/cmake-3.6/Modules/CPackNSIS.cmake (2 lines): - line 142: #FIXME we should put NSIS specific code here - line 143: #FIXME but I'm not doing it because I'm not able to test it... tensorflow/tensorflow/core/kernels/mkl_input_conversion_op.cc (2 lines): - line 121: // TODO: For now, input0 is converted and input1 is unchanged - line 171: // TODO: Cleanup op_data_type and has_avx512f_ after these two parameters blaze-benchmark/cmake/cmake/share/cmake-3.6/Modules/Platform/CYGWIN-GNU.cmake (2 lines): - line 21: # TODO: Is -Wl,--enable-auto-import now always default? - line 52: # TODO: Is -Wl,--enable-auto-import now always default? tensorflow/tensorflow/core/user_ops/huge_const_op/npy.h (2 lines): - line 83: // TODO: implement as constexpr - line 641: // TODO: implement != and == for dtype_t tensorflow/tensorflow/core/lib/bfloat16/bfloat16.h (1 line): - line 180: // TODO: There is a slightly faster implementation (8% faster on CPU) blaze-benchmark/cmake/cmake/share/cmake-3.6/Modules/Compiler/Intel-C-FeatureTests.cmake (1 line): - line 6: # FIXME: Intel C feature detection works only when simulating the GNU compiler. tensorflow/tensorflow/core/kernels/cwise_op_gpu_div.cu.cc (1 line): - line 27: // ROCM TODO: fix compiler error for complex64 / complex128 division tensorflow/tensorflow/core/framework/tensor.h (1 line): - line 682: // TODO: Remove this when we have a better story for detecting tensorflow/tensorflow/python/distribute/cross_device_ops.py (1 line): - line 88: # TODO:(b/138823479): handle the tensor value properly. tensorflow/tensorflow/core/kernels/sparse_tensors_map_ops.cc (1 line): - line 437: // TODO: copy shape from TensorShape to &expanded_shape_t(1) blaze-benchmark/cmake/cmake/share/cmake-3.6/Modules/GenerateExportHeader.cmake (1 line): - line 197: # TODO: Install this macro separately? tensorflow/tensorflow/core/kernels/cwise_op_select.cc (1 line): - line 161: // TODO (yongtang): Consolidate into n-ary broadcast, instead of multiple tensorflow/tensorflow/lite/experimental/objc/sources/TFLInterpreter.mm (1 line): - line 359: // TODO: Set quantization parameters when C API supports it. blaze-benchmark/cmake/cmake/share/cmake-3.6/Modules/Platform/Windows-GNU.cmake (1 line): - line 123: # TODO: check for which gcc versions this is still needed, not needed for gcc >= 4.4. tensorflow/tensorflow/contrib/mpi_collectives/__init__.py (1 line): - line 271: # TODO: Move this to library load and eliminate mpi.Session() tensorflow/tensorflow/core/kernels/mkl_conv_ops.cc (1 line): - line 674: // TODO 3-D support for Depthwise is not there tensorflow/tensorflow/core/kernels/depthwise_conv_op_gpu.h (1 line): - line 1017: // TODO: Add fp32 accumulation to half calls of this function. This addition tensorflow/tensorflow/core/kernels/substr_op.cc (1 line): - line 126: // TODO: Use ternary broadcasting for once available in Eigen. Current tensorflow/tensorflow/compiler/mlir/tensorflow/ir/tf_ops.cc (1 line): - line 222: // TODO: support other TensorFlow specific types. tensorflow/tensorflow/core/kernels/cuda_sparse.h (1 line): - line 38: // TODO: reuse with cuda_solvers tensorflow/tensorflow/core/kernels/cudnn_rnn_ops.cc (1 line): - line 1819: // TODO: Current V3 only uses the default standard algorithm to process tensorflow/tensorflow/core/user_ops/topk_op/util.h (1 line): - line 36: //TODO: fast-topk algorithm tensorflow/tensorflow/core/kernels/mkl_pooling_ops_common.cc (1 line): - line 46: // FIXME: Pooling doesn't expose to get the src_primitive_desc, tensorflow/tensorflow/core/ops/lookup_ops.cc (1 line): - line 213: // TODO: Validate keys and values shape. blaze-benchmark/cmake/cmake/share/cmake-3.6/Modules/FindDevIL.cmake (1 line): - line 45: # TODO: Add version support. tensorflow/tensorflow/python/framework/python_op_gen_internal.cc (1 line): - line 661: comment = "TODO: add doc.\n"; tensorflow/tensorflow/core/kernels/mkl_requantization_range_per_channel_op.cc (1 line): - line 66: // TODO: verify performance of not transposing and finding the min max blaze-benchmark/cmake/cmake/share/cmake-3.6/Modules/Platform/Haiku.cmake (1 line): - line 27: # TODO See CMakeDetermineCompilerId.cmake for some more things we may want to do. tensorflow/tensorflow/core/grappler/clusters/utils.cc (1 line): - line 127: // ROCM TODO review if numbers here are valid tensorflow/tensorflow/python/ops/math_grad.py (1 line): - line 1834: # TODO This fails when x contains 0 and should be fixed tensorflow/tensorflow/lite/nnapi/NeuralNetworksShim.h (1 line): - line 42: // TODO: change RTLD_LOCAL? Assumes there can be multiple instances of nn tensorflow/tensorflow/python/framework/fast_tensor_util.pyx (1 line): - line 21: # TODO: Use np.float16_t when cython supports it. tensorflow/tensorflow/python/platform/googletest.py (1 line): - line 171: and __set__ will be called when stubbing (TODO: A better idea would serving/tensorflow_serving/util/net_http/server/public/server_request_interface.h (1 line): - line 49: // TODO: c++14 ::operator delete[](ptr, size_t) tensorflow/tensorflow/python/ops/parallel_for/pfor.py (1 line): - line 489: # TODO(agarwal): avoid this stacking. See TODO earlier in tensorflow/tensorflow/compiler/mlir/tensorflow/translate/control_to_executor_dialect.cc (1 line): - line 145: // FIXME: StringSwitch tensorflow/tensorflow/contrib/learn/python/learn/datasets/__init__.py (1 line): - line 107: TODO: tensorflow/tensorflow/compiler/mlir/xla/ir/xla_ops.cc (1 line): - line 98: // TODO: support other XLA specific types. tensorflow/tensorflow/core/platform/cloud/gcs_dns_cache.cc (1 line): - line 46: // TODO:WSAGetLastError is better than gai_strerror tensorflow/tensorflow/compiler/tf2tensorrt/utils/trt_shape_optimization_profiles.cc (1 line): - line 302: // TODO: Remove this when the bug is fixed. tensorflow/tensorflow/python/eager/memory_tests/memory_test_util.py (1 line): - line 41: # FIXME: The nature of this test leaves few other options. Maybe there tensorflow/tensorflow/java/src/main/java/org/tensorflow/EagerOperationBuilder.java (1 line): - line 154: // TODO (karllessard) could be supported by adding this attribute type in the eager C API tensorflow/tensorflow/compiler/tf2tensorrt/convert/convert_nodes.cc (1 line): - line 3176: // TODO: Allow conversion if kImplicitBatchModeCompatible||kOptimal is used. tensorflow/tensorflow/core/kernels/scatter_op_gpu.cu.cc (1 line): - line 47: // TODO: The following fails to compile. tensorflow/tensorflow/java/build_defs.bzl (1 line): - line 151: # TODO: stylistic changes in code blaze-benchmark/cmake/cmake/share/cmake-3.6/Modules/CPackRPM.cmake (1 line): - line 1304: # FIXME feature not finished (yet) tensorflow/tensorflow/lite/kernels/internal/optimized/optimized_ops.h (1 line): - line 2353: // TODO: BroadcastDiv is intentionally duplicated from reference_ops.h. tensorflow/tensorflow/python/framework/tensor_util.py (1 line): - line 54: # TODO: Remove the conversion if cython supports np.float16_t tensorflow/tensorflow/core/grappler/mutable_graph_view.cc (1 line): - line 484: // fanins actually exists in the graph, and there is already TODO for that. blaze-benchmark/cmake/cmake/share/cmake-3.6/editors/emacs/cmake-mode.el (1 line): - line 309: ; FIXME: Ignore first line if it is "cmake version ..." from CMake < 3.0. tensorflow/tensorflow/stream_executor/gpu/gpu_diagnostics.h (1 line): - line 31: // FIXME: These functions are in stream_executor::cuda namespaces for now NANN_impls/nann/delivery/build_opt_graph.py (1 line): - line 24: # TODO: move the following into parse_opt function tensorflow/tensorflow/tools/def_file_filter/def_file_filter.py.tpl (1 line): - line 25: TODO: this works fine but there is an issue with exporting tensorflow/tensorflow/lite/experimental/ruy/kernel_arm.h (1 line): - line 171: // A53 and A55r1. TODO: should this be folded into tuning? tensorflow/tensorflow/contrib/cmake/tools/create_def_file.py (1 line): - line 25: TODO: this works fine but there is an issue with exporting tensorflow/tensorflow/core/ops/math_ops.cc (1 line): - line 885: // TODO (yongtang): Consolidate 3-ary broadcast instead of blaze-benchmark/cmake/cmake/share/cmake-3.6/Modules/Platform/Windows-MSVC.cmake (1 line): - line 308: set(CMAKE_${lang}_FLAGS_MINSIZEREL_INIT "/MD -DNDEBUG") # TODO: Add '-Os' once VS generator maps it properly for Clang blaze-benchmark/cmake/cmake/share/cmake-3.6/Modules/FindLAPACK.cmake (1 line): - line 74: # TODO: move this stuff to separate module tensorflow/tensorflow/core/kernels/matmul_op.cc (1 line): - line 507: // TODO: Avoid extra copy to make bfloat16 matmul efficient on CPU. tensorflow/tensorflow/workspace.bzl (1 line): - line 85: # TODO: Remove def file filter when TensorFlow can export symbols properly on Windows. tensorflow/tensorflow/core/kernels/hexagon/hexagon_control_wrapper.cc (1 line): - line 390: // TODO: Accept all results tensorflow/tensorflow/core/kernels/mkl_conv_ops.h (1 line): - line 383: // TODO add support for 3-D Depthwise tensorflow/tensorflow/core/platform/s3/s3_file_system.cc (1 line): - line 62: // TODO (yongtang): `S3_REGION` should be deprecated after 2.0. tensorflow/tensorflow/core/kernels/cwise_op_mul_1.cc (1 line): - line 51: // ROCM TODO: re-enable complex64 / complex128 after compiler fix blaze-benchmark/cmake/cmake/share/cmake-3.6/Modules/BundleUtilities.cmake (1 line): - line 1011: # TODO: implement this function for real... blaze-benchmark/cmake/cmake/share/cmake-3.6/Modules/CPackDeb.cmake (1 line): - line 752: # TODO: automate 'objdump -p | grep NEEDED' tensorflow/tensorflow/cc/framework/cc_op_gen.cc (1 line): - line 567: comment = "TODO: add doc.\n"; tensorflow/tensorflow/core/kernels/segment_reduction_ops.cc (1 line): - line 208: // TODO: This implementation of SegmentSumGPUOp is sometimes slower than tensorflow/tensorflow/go/graph.go (1 line): - line 64: // TODO: extend this structure to support more options from TF_ImportGraphDefOptions tensorflow/tensorflow/core/util/gpu_kernel_helper.h (1 line): - line 167: // ROCM TODO re-enable them after adding fp16 support logic blaze-benchmark/cmake/cmake/share/cmake-3.6/Modules/Compiler/Intel-CXX-FeatureTests.cmake (1 line): - line 5: # FIXME: Intel C++ feature detection works only when simulating the GNU compiler. tensorflow/tensorflow/python/grappler/cluster.i (1 line): - line 254: // TODO: extends this to support outputs as well