Sources/TensorFlow/Operators/Basic.swift (8 lines): - line 579: // TODO: precondition(axis >= 0 && axis < rank, "'axis' is out of range.") - line 580: // TODO: precondition(batchDimensionCount <= axis, - line 911: // TODO: What about precedence? Why is this operator used for broadcasting? - line 945: // TODO: Simplify this once differentiating control flow is supported. - line 1096: // TODO: Negative indexing and strides syntax. - line 1107: // TODO: Precondition `lowerBounds.count == upperBounds.count`, - line 1109: // TODO: Differentiating control flow is not supported yet, thus the thunks. - line 1197: // TODO: Cannot extend non-nominal type 'UnboundedRange'. Sources/TensorFlow/Operators/NN.swift (5 lines): - line 634: // TODO: Currently this is not higher order differentiable. Redefine in - line 694: // TODO: Currently this is not higher order differentiable. Redefine in - line 752: // TODO: Currently this is not higher order differentiable. Redefine in - line 812: // TODO: Currently this is not higher order differentiable. Redefine in - line 888: // TODO: Currently this is not higher order differentiable. Redefine in Sources/TensorFlow/Operators/Math.swift (4 lines): - line 23: // TODO: - line 27: // TODO: Remove the following extension once `./` and `./=` are defined for - line 899: // FIXME: Scoped imports are not yet supported in parseable module interfaces, so - line 2727: // TODO: Consider making the return type be generic over `FloatingPoint` types Sources/TensorFlow/Epochs/NonuniformTrainingEpochs.swift (4 lines): - line 46: // TODO: Figure out how to handle non-threasafe PRNGs with a parallel shuffle - line 94: // TODO: use a parallel shuffle like mergeshuffle - line 120: // TODO: fully sorting is overkill; we should use introselect here. - line 218: // TODO: Test the laziness of the result. Sources/x10/xla_tensor/cross_replica_reduces.cpp (3 lines): - line 105: // TODO: We use pseudo-tokens ATM, which are real values. This need to be - line 147: // TODO: This is missing layout pinning ATM. If XLA scheduling is not exactly - line 162: // TODO: This is missing layout pinning ATM. If XLA scheduling is not exactly Sources/x10/xla_tensor/tensor.cpp (2 lines): - line 483: // LOG(FATAL) << "TODO check device"; - line 943: // TODO: This can be optimized via proper XRT/XLA computation. Sources/TensorFlow/Core/MixedPrecision.swift (2 lines): - line 155: // TODO (SR-12968): Mark `tensor` with `@noDerivative` and remove custom vjp below. - line 165: // TODO (SR-12968): Remove when `tensor` can be marked `@noDerivative` in `init`. Sources/TensorFlow/Core/Runtime.swift (2 lines): - line 57: // TODO: change this and subsequent properties from static to thread local. - line 329: // TODO: Can we interop between modes? Sources/TensorFlow/Epochs/TrainingEpochs.swift (2 lines): - line 36: // TODO: Figure out how to handle non-threasafe PRNGs with a parallel shuffle - line 68: // TODO: use a parallel shuffle like mergeshuffle Sources/TensorFlow/Core/LazyTensorTrace.swift (1 line): - line 65: // TODO: We only pick operations on which `lazyOp` depends on. Note that Sources/TensorFlow/Bindings/generate_wrappers.py (1 line): - line 745: # FIXME: Re-add `@_frozen` after SR-9739 is resolved. Sources/TensorFlow/Core/Utilities.swift (1 line): - line 158: // TODO: Consider revising the call sites where this is necessary to only need UnsafeMutablePointer Sources/x10/swift_bindings/optimizers/Optimizer.swift (1 line): - line 102: // TODO: Experiment with efficiently fusing these... Sources/TensorFlow/Operators/Comparison.swift (1 line): - line 148: // TODO: infix operator ≈: ComparisonPrecedence Sources/TensorFlow/Operators/Dataset.swift (1 line): - line 29: // TODO: There's no support for TF's "global seed" yet, so we always use the default graph seed as Sources/TensorFlow/StdlibExtensions.swift (1 line): - line 284: // FIXME: `one` should probably be removed from the protocol. `Array` cannot represent `one`. Sources/TensorFlow/X10/Device.swift (1 line): - line 149: // TODO: Pull this from withDevice() {} mechanism? Sources/TensorFlow/Epochs/Algorithms.swift (1 line): - line 193: // FIXME: this algorithm should be benchmarked on arrays against Sources/CX10/functional_while.cc (1 line): - line 49: // TODO: color when building the graph as this can be n^2 Sources/TensorFlow/Epochs/Collatable.swift (1 line): - line 31: // TODO: derived conformance Sources/Tensor/Random.swift (1 line): - line 511: // FIXME: Box-Muller can generate two values for only a little more than the Sources/x10/swift_bindings/Device.swift (1 line): - line 149: // TODO: Pull this from withDevice() {} mechanism? Sources/TensorFlow/Core/LazyTensorTraceCache.swift (1 line): - line 116: // TODO: we might avoid running the following check based on results of promotableConstant