Summary: 166 instances, 142 unique Text Count // TODO: convert first/last into inequality check (<=, >=), so they can work with boundaries 1 if (currentFunction.GetCurrentRegion() == nullptr) // TODO: put this check in GetCurrentFunction() 1 // TODO: for each dimension, loop over minimum of input and output interval. Then we don't have to check if the value is out-of-bounds 2 // TODO: inputTypes, outputTypes, extraArgs 2 // TODO: fix TransformInputBlock to take ranges instead of tileSize/filterSize/blockSize 1 // TODO: if we're not separable, we want to reduce first and accumulate a single output channel (== filter index) 1 // TODO: reset output layout (incl. transpose info) 1 Sparsity GetWeightsSparsity(const WeightsType& weights) // TODO: add layout 1 // TODO: in the "trainFiltersIndependently" case, we should 1 // TODO: log error 2 // TODO: fix this so that GetNonPointerType call isn't needed 1 // TODO: remove this eventually 1 // TODO: add implicit padding 1 // TODO: deal with merge points -- need to skip parallel area and return something before path split. 1 // TODO: use the entries of dataOrder to compute the indices 1 auto twiddleFactorsUnwrappedVar = module.ConstantArray(std::string("twiddles_") + std::to_string(halfN), twiddleFactorsUnwrapped); // TODO: encode type name in variable name 1 // TODO: if FunctionType was a function that took a vector of inputs, then we could dispense with this `if constexpr` block 1 // TODO: rename this function to something without "train" or "predictor" in its name 1 // TODO: combine this and IsCompleteAncestor so it's not O(N^2) (via set-intersect?) 1 // TODO: assert (channelStart + filterRange.size * numFilterChannels) < numChannels) --- make sure it doesn't wrap around while processing a block 1 // TODO: reverse order of args? 1 // TODO : make this a template specialization of Define(), currently lambdas and std::functions aren't 1 // TODO: need to know if we're going to invoke any kernels after the inner loops, and remove them from the valid kernel groups 1 std::vector categoryNames; // TODO: read this in from a file 1 // TODO : Generalize to machine characteristics and move out of CachingStrategies 1 with DriveTest(model=model, # TODO - no labels, no expected 1 // TODO: block-vectorize this: 1 # TODO - only chose the convolution method based on perf at this point. Need other strategy. 1 // TODO: this might not be needed in the high level api 1 // TODO: in both the "independent channel" and "spatial filter" cases, we really are optimizing to find a scalar result. 1 // TODO: add a comment describing the logic behind setting channelIndex (it allows us to unify depthwise-separabale and non-separable logic) 1 // TODO: assert we're idle (until we can handle multiple task arrays to be active) 1 // TODO: check version number and read this format if in back-compat mode 1 // TODO: 1 // TODO : determine if a vectorized approach is worthwhile here 1 // TODO: move `GetLoopRage` somewhere else 1 auto channelIndex = inputRange.channels.begin; // #### TODO: verify we don't really want inputRange.channels.index here 1 // TODO: rename this to avoid clashes with other PointerOffset() 1 // TODO: throw if we read bad values or hit EOF too soon 1 // TODO: record info about both phases (sparsify and reoptimize) 1 // TODO: assert back of _objectInfo == objInfo 1 // TODO: We want to only fire on a loop involving a leaf child of the index 1 # TODO - drivetest.py does not support "orangepi0" 1 // TODO: add a reorder node here that adds padding to the output, if necessary 1 // TODO: rename 'output' param here --- it's the input to the convolutional layer 1 # TODO - change this order requirement to make it more flexible 1 // TODO: deal with padding 1 # TODO: This logic is very fragile, we may want to have a model 1 # TODO: get rid of this, but keep the filter-transform part 1 archiver["filterSize"] << _filterSize; // TODO: get this from weights layout 1 ## TODO: 1 // TODO: re-enable this branch when scalar port bug is fixed 1 // TODO: each boundary index needs its own "placement" value (e.g., you could have a kernel that runs when j==0 and k==N-1) 1 // TODO: Put this back once we're using the transposed output layout 2 // TODO: What does "runtime" variable mean? Stack/heap? This is only called in 1 place, from MapCompiler::AllocatePortFunctionArgument 1 // TODO: rename this function to imply we're running an optimization 1 // TODO: deal with eventually not having an emit-time-constant range here 1 // TODO: Fix this to deal with convParams.stride != 1 1 1 // TODO: add index, testVal to AND list, later return a conjunction of equality predicates 1 // TODO: restore state of variables 2 // TODO: put the above IREmitter call in the IRFunctionEmitter constructor 3 raise RuntimeError("FIXME: Traced RNNs don't support backward") 1 // TODO: this is really paddedHeight * filterSize * batchSize * stackSize - inputPadding * filterSize * batchSize 1 // TODO: check this carefully to make sure it's valid for stackSize != all and stackSize != 1 1 // TODO: should this be 64 bits for 64-bit systems? 1 // TODO: also add std::array of archivable variant types 1 // TODO: something about regions 1 // TODO: make these be TransformDataWithSubmodel 1 bit_map = 1 # TODO: need a way to get this from the model 2 // TODO: write out as a table? 1 llvm::StructType* IRThreadPoolTaskQueue::GetTaskQueueDataType(IRModuleEmitter& module) const // TODO: come up with a naming convention for "class" structs like this 1 bool needsDereference = valType->isPointerTy(); // TODO: Maybe this should be `isPtrOrPtrVectorTy()` or even `isPtrOrPtrVectorTy() || isArrayTy()` 1 // TODO: fix MatrixVectorMultiplyNode so it exposes the transpose options supported by BLAS GEMV functions. 1 // TODO: interleave load/compress more tightly to eliminate need for a scratch variable to hold a whole row 1 // TODO: allocate a set of temporaries per thread and parallelize the big outer loop 1 // TODO: Move this out to the API surface 2 // TODO: combine precompiled-IR case with use-own-function case 1 // TODO: rename 'output' parameter -- it's where we graft the new node 2 // TODO: fix up logic for deciding how many tasks to use. 1 math::MultiplyScaleAddUpdate(1.0, GetLabelEmbeddings(), similarityToPrototypes, 0.0, labels); // TODO due to the zero, there is a more appropriate operation 1 // TODO: Change this so that IDs are the responsibility of the EmitterContext 1 // TODO: Make this the basis of an iterator for MemoryLayout 2 // TODO: interleave load/compress more tightly to eliminate need for a scratch variable to hold the whole row 1 // TODO: turn this into a real For() loop 1 # TODO - here we ignore the global compiler options. 1 // TODO: add order (row-maj / channel-maj) parameter 1 // TODO: just copy the node and modify its layer 1 if (IsFullyConnectedLayerNode(&node)) // TODO: replace with submodel-matcher 1 // TODO: fix this to work with depthwise-separable convolutions 1 // TODO: add to _functions list 1 // TODO : Support buffer alignment in CppEmitterContext 1 throw 0; // TODO: throw a real exception (of type value::Exception::DebugTrapException, perhaps) 1 // TODO: need to allow using non-"dimension" indices as well (for non-innermost kernels) 1 auto fn = reinterpret_cast(jitter.GetFunctionAddress(_moduleName + "_PrintNodeProfilingInfo")); // TODO: hide this reinterpret_cast in a templated method of IRExecutionEngine 1 // TODO: can we use an array type here? 1 // TODO (kerha): _computeContext isn't copied right now. Not sure if it should be. [2019-08-23] 1 // TODO: set initial value of index variable (at least in loop-nest-printing case) 1 // TODO: pass in original layer's output shape, for verification? (or, compare output of this function with original layer output shape) 1 // TODO: assert index1 and index2 are both in this dimension 1 # TODO: check package versions (currently we just check names) 1 llvm::StructType* IRThreadPoolTaskArray::GetTaskArrayDataType(IRModuleEmitter& module) // TODO: come up with a naming convention for "class" structs like this 1 // TODO: find a way to tell the differerence between a port that doesn't have a mapping because 1 // TODO: set a flag indicating that this function is done 1 // TODO: add this function to the _functions list?? 6 // TODO: find a better (more general) way to indicate what the solution is, rather than with a "isSpatialConvolution" flag 1 // TODO: add options for normalizing and/or mean-subtracting 1 // TODO: get input of submodel to retrain 1 # TODO - only optimize based on convolution method at this point 1 // TODO: figure out what to do with the "where" parameter 2 ## TODO: emit just a single loop for 1D convolutions 1 // TODO: add alignment directive 1 // TODO: have a more specific check to see if the variable is mapped to a port, rather than if it's a function input/output 1 # TODO: make this recursive to deal with multi-level directories 1 // TODO: make new function that emits the contents of the task function (which is also the stuff emitted in the serial case), and call this from 1 config[option[i][2:]] = True # TODO - assuming flag without argument sets to True 1 archiver["inputLayout"] << _inputMemoryLayout; // TODO: get rid of this 1 // TODO: get types in a way that doesn't require emitting these variables 1 // TODO: fix 1 // TODO: assert(bitcount(length) == 1) (i.e., length is a power of 2) 3 // TODO : make this a template specialization of Define(), currently lambdas and std::functions aren't 1 // TODO: I think now the outputTile is always contiguous, so we can just iterate over it continuously 1 // TODO: put this in a function that preprocesses the kernel predicates when adding the kernels to the schedule 1 // TODO: return nullptr if out of bounds (this is device-side code, and we may not be able to throw exceptions) 4 // TODO: add a reorder node here that makes the input be a contiguous vector, if necessary 1 // TODO: eventually, pass this in to ComputeTransformedOutput, instead of just assuming a layout there 1 // TODO move to Array slice code and generalize 1 // TODO: explicitly check for empty loop? 1 // TODO: later we may normalize the loops, in which case indexScale here will be the loop increment 1 model::PortMemoryLayout _inputMemoryLayout; // TODO: get rid of this by using a ReinterpretLayoutNode if necessary 1 else if (IsConvolutionalLayerNode(&node)) // TODO: replace with submodel-matcher 1 // TODO: get rid of const_cast 2 # TODO: combine MatrixLiteral with MatrixExpr 1 // TODO: Rename this function to make it clear we're adding nodes to the model (if normalizeInputs is true) 1 # TODO: only set this property (and omit ${LANGUAGE_LIBRARIES} from the swig_link_libraries call) if we're 1 // TODO: Interpolate if there is a sample, and currentTime > sampleTime 1 // TODO: let's make a popcount function that does the right thing 1 // TODO: See about removing the "% numChannels" if we can know that it's unnecessary at compile-time (e.g., if numFilterChannels == N*numChannels) for N > 0 1 // TODO : replace memory offsets with absolute offset support 1 // TODO: bounds-checking 1 // TODO: rename these to something without "train" and "predictor" in the name 1 // const auto& outputLayout = this->GetOutputMemoryLayout().ReorderedCopy({2,0,1}); // TODO: reorder from r,c,d -> d,r,c 2