evaluation/tiny_benchmark/maskrcnn_benchmark/data/transforms/transforms.py (5 lines): - line 398: # TODO: should be replace with other policy to remove most ignore. - line 603: # TODO: should be replace with other policy to remove most ignore. - line 761: # TODO: must change to insert of image(not union) as crop area ratio - line 882: # TODO: should be replace with other policy to remove most ignore. - line 985: # TODO: must change to insert of image(not union) as crop area ratio evaluation/tiny_benchmark/maskrcnn_benchmark/modeling/rpn/retinanet/inference.py (4 lines): - line 96: # TODO most of this can be made out of the loop for - line 98: # TODO:Yang: Not easy to do. Because the numbers of detections are - line 127: # TODO very similar to filter_results from PostProcessor - line 129: # TODO Yang: solve this issue in the future. No good solution evaluation/tiny_benchmark/maskrcnn_benchmark/structures/keypoint.py (4 lines): - line 10: # FIXME remove check once we have better integration with device - line 18: # TODO should I split them? - line 129: # TODO this doesn't look great - line 153: # TODO make this nicer, this is a direct translation from C2 (but removing the inner loop) evaluation/tiny_benchmark/maskrcnn_benchmark/csrc/cuda/ROIPool_cuda.cu (3 lines): - line 10: // TODO make it in a common file - line 155: // TODO remove the dependency on input and use instead its sizes -> save memory - line 169: // TODO add more checks evaluation/tiny_benchmark/maskrcnn_benchmark/csrc/cuda/ROIAlign_cuda.cu (2 lines): - line 9: // TODO make it in a common file - line 301: // TODO remove the dependency on input and use instead its sizes -> save memory evaluation/tiny_benchmark/maskrcnn_benchmark/modeling/rpn/locnet/inference.py (2 lines): - line 160: # TODO very similar to filter_results from PostProcessor - line 162: # TODO Yang: solve this issue in the future. No good solution evaluation/tiny_benchmark/maskrcnn_benchmark/structures/segmentation_mask.py (2 lines): - line 95: # TODO chck if necessary - line 134: # TODO add squeeze? evaluation/tiny_benchmark/maskrcnn_benchmark/modeling/rpn/gaussian_net/inference.py (2 lines): - line 207: # TODO very similar to filter_results from PostProcessor - line 209: # TODO Yang: solve this issue in the future. No good solution evaluation/tiny_benchmark/maskrcnn_benchmark/modeling/box_coder.py (2 lines): - line 32: TO_REMOVE = 1 # TODO remove - line 64: TO_REMOVE = 1 # TODO remove evaluation/tiny_benchmark/maskrcnn_benchmark/structures/boxlist_ops.py (2 lines): - line 42: # TODO maybe add an API for querying the ws / hs - line 91: # TODO redundant, remove models/yolo.py (2 lines): - line 108: # # TODO: size-adaptive? - line 156: patch_off = torch.cat((offsets[:, :1], offsets[:, 1:] / r), dim=1) # TODO: from 4 to 32 evaluation/tiny_benchmark/maskrcnn_benchmark/modeling/rpn/fcos/inference.py (2 lines): - line 138: # TODO very similar to filter_results from PostProcessor - line 140: # TODO Yang: solve this issue in the future. No good solution train.py (2 lines): - line 165: if pretrained: # TODO: freeze - line 563: opt.epochs, opt.hyp = opt.epochs * 0 + epochs, hyp # TODO: epochs evaluation/tiny_benchmark/maskrcnn_benchmark/modeling/rpn/cascade_fcos/inference.py (2 lines): - line 170: # TODO very similar to filter_results from PostProcessor - line 172: # TODO Yang: solve this issue in the future. No good solution evaluation/tiny_benchmark/maskrcnn_benchmark/modeling/roi_heads/mask_head/inference.py (2 lines): - line 10: # TODO check if want to return a single BoxList or a composite - line 187: # TODO: Is this JIT compatible? evaluation/tiny_benchmark/maskrcnn_benchmark/csrc/cuda/nms.cu (2 lines): - line 83: THCState *state = at::globalContext().lazyInitCUDA(); // TODO replace with getTHCState - line 126: // TODO improve this part evaluation/tiny_benchmark/maskrcnn_benchmark/modeling/roi_heads/keypoint_head/inference.py (2 lines): - line 35: # TODO remove and use only the Keypointer - line 111: # TODO do this properly evaluation/tiny_benchmark/maskrcnn_benchmark/modeling/rpn/retinanet_fa/box_coder.py (2 lines): - line 32: TO_REMOVE = 1 # TODO remove - line 64: TO_REMOVE = 1 # TODO remove evaluation/tiny_benchmark/maskrcnn_benchmark/layers/smooth_l1_loss.py (1 line): - line 5: # TODO maybe push this to nn? evaluation/tiny_benchmark/maskrcnn_benchmark/structures/bounding_box.py (1 line): - line 181: # TODO should I filter empty boxes here? utils/loss.py (1 line): - line 270: gain[2:6] = torch.tensor([width, height, width, height], dtype=dtype) / (8 * r) # TODO: from 4 to 32 evaluation/tiny_benchmark/maskrcnn_benchmark/csrc/cuda/SigmoidFocalLoss_cuda.cu (1 line): - line 14: // TODO make it in a common file evaluation/tiny_benchmark/maskrcnn_benchmark/modeling/rpn/cascade_fcos/cascade_fcos.py (1 line): - line 19: # TODO: Implement the sigmoid version first. evaluation/tiny_benchmark/maskrcnn_benchmark/modeling/roi_heads/roi_heads.py (1 line): - line 25: # TODO rename x to roi_box_features, if it doesn't increase memory consumption utils/wandb_logging/wandb_utils.py (1 line): - line 245: # TODO: Explore multiprocessing to slpit this loop parallely| This is essential for speeding up the the logging evaluation/tiny_benchmark/maskrcnn_benchmark/modeling/rpn/fcos/fcos.py (1 line): - line 19: # TODO: Implement the sigmoid version first. evaluation/tiny_benchmark/maskrcnn_benchmark/structures/image_list.py (1 line): - line 50: # TODO Ideally, just remove this and let me model handle arbitrary models/replknet.py (1 line): - line 20: # TODO more efficient PyTorch implementations of large-kernel convolutions. Pull requests are welcomed. evaluation/tiny_benchmark/maskrcnn_benchmark/csrc/nms.h (1 line): - line 16: // TODO raise error if not compiled with CUDA evaluation/tiny_benchmark/maskrcnn_benchmark/solver/lr_scheduler.py (1 line): - line 7: # FIXME ideally this would be achieved with a CombinedLRScheduler, evaluation/tiny_benchmark/maskrcnn_benchmark/modeling/backbone/fpn.py (1 line): - line 69: # TODO use size instead of scale to make it robust to different sizes evaluation/tiny_benchmark/tools/train_net.py (1 line): - line 82: torch.cuda.empty_cache() # TODO check if it helps evaluation/tiny_benchmark/maskrcnn_benchmark/modeling/roi_heads/box_head/inference.py (1 line): - line 58: # TODO think about a representation of batch of boxes evaluation/tiny_benchmark/maskrcnn_benchmark/layers/smooth_l1_loss2.py (1 line): - line 16: # TODO maybe push this to nn? evaluation/tiny_benchmark/maskrcnn_benchmark/modeling/backbone/resnet.py (1 line): - line 305: # TODO: specify init for the above evaluation/tiny_benchmark/maskrcnn_benchmark/modeling/rpn/inference.py (1 line): - line 161: # TODO resolve this difference and make it consistent. It should be per image, evaluation/tiny_benchmark/tools/train_test_net.py (1 line): - line 115: torch.cuda.empty_cache() # TODO check if it helps evaluation/tiny_benchmark/maskrcnn_benchmark/modeling/roi_heads/mask_head/loss.py (1 line): - line 30: # TODO put the proposals on the CPU, as the representation for the evaluation/tiny_benchmark/maskrcnn_benchmark/modeling/rpn/retinanet_fa/retinanet.py (1 line): - line 27: # TODO: Implement the sigmoid version first. evaluation/tiny_benchmark/maskrcnn_benchmark/modeling/roi_heads/keypoint_head/loss.py (1 line): - line 93: # TODO check if this is the right one, as BELOW_THRESHOLD evaluation/tiny_benchmark/maskrcnn_benchmark/layers/sigmoid_focal_loss.py (1 line): - line 8: # TODO: Use JIT to replace CUDA implementation in the future. evaluation/tiny_benchmark/maskrcnn_benchmark/data/samplers/distributed.py (1 line): - line 3: # FIXME remove this once c10d fixes the bug it has evaluation/tiny_benchmark/maskrcnn_benchmark/modeling/rpn/retinanet/retinanet.py (1 line): - line 25: # TODO: Implement the sigmoid version first. evaluation/tiny_benchmark/maskrcnn_benchmark/modeling/rpn/locnet/head.py (1 line): - line 14: # TODO: Implement the sigmoid version first.