MaskRCNN/pytorch/maskrcnn_benchmark/data/datasets/evaluation/coco/coco_eval.py (5 lines): - line 79: # TODO replace with get_img_info? - line 117: # TODO replace with get_img_info? - line 197: # TODO replace with get_img_info? - line 203: # TODO maybe remove this and make it explicit in the documentation - line 341: # TODO make it pretty MaskRCNN/pytorch/maskrcnn_benchmark/csrc/cuda/ROIPool_cuda.cu (3 lines): - line 10: // TODO make it in a common file - line 155: // TODO remove the dependency on input and use instead its sizes -> save memory - line 169: // TODO add more checks MaskRCNN/pytorch/maskrcnn_benchmark/modeling/backbone/resnet.py (2 lines): - line 317: # TODO: specify init for the above - line 413: # TODO: specify init for the above MaskRCNN/pytorch/maskrcnn_benchmark/structures/segmentation_mask.py (2 lines): - line 95: # TODO chck if necessary - line 134: # TODO add squeeze? MaskRCNN/pytorch/maskrcnn_benchmark/csrc/cuda/nms.cu (2 lines): - line 83: THCState *state = at::globalContext().lazyInitCUDA(); // TODO replace with getTHCState - line 126: // TODO improve this part MaskRCNN/pytorch/maskrcnn_benchmark/modeling/box_coder.py (2 lines): - line 37: TO_REMOVE = 1 # TODO remove - line 69: TO_REMOVE = 1 # TODO remove MaskRCNN/pytorch/maskrcnn_benchmark/modeling/roi_heads/mask_head/inference.py (2 lines): - line 10: # TODO check if want to return a single BoxList or a composite - line 192: # TODO: Is this JIT compatible? MaskRCNN/pytorch/maskrcnn_benchmark/csrc/cuda/ROIAlign_cuda.cu (2 lines): - line 9: // TODO make it in a common file - line 301: // TODO remove the dependency on input and use instead its sizes -> save memory MaskRCNN/pytorch/maskrcnn_benchmark/structures/boxlist_ops.py (2 lines): - line 43: # TODO maybe add an API for querying the ws / hs - line 93: # TODO redundant, remove MaskRCNN/pytorch/maskrcnn_benchmark/layers/smooth_l1_loss.py (1 line): - line 5: # TODO maybe push this to nn? MaskRCNN/pytorch/maskrcnn_benchmark/modeling/rpn/inference.py (1 line): - line 223: # TODO resolve this difference and make it consistent. It should be per image, MaskRCNN/pytorch/maskrcnn_benchmark/csrc/cuda/generate_mask_targets.cu (1 line): - line 440: //TODO: larger threads-per-block might be better here, because each CTA uses 32 KB of shmem, MaskRCNN/pytorch/maskrcnn_benchmark/structures/bounding_box.py (1 line): - line 181: # TODO should I filter empty boxes here? MaskRCNN/pytorch/maskrcnn_benchmark/engine/tester.py (1 line): - line 15: torch.cuda.empty_cache() # TODO check if it helps MaskRCNN/pytorch/maskrcnn_benchmark/modeling/backbone/fpn.py (1 line): - line 56: # TODO use size instead of scale to make it robust to different sizes MaskRCNN/pytorch/maskrcnn_benchmark/data/datasets/evaluation/voc/voc_eval.py (1 line): - line 13: # TODO need to make the use_07_metric format available MaskRCNN/pytorch/maskrcnn_benchmark/data/datasets/coco.py (1 line): - line 38: # TODO might be better to add an extra field MaskRCNN/pytorch/tools/train_net.py (1 line): - line 188: torch.cuda.empty_cache() # TODO check if it helps MaskRCNN/pytorch/maskrcnn_benchmark/modeling/roi_heads/roi_heads.py (1 line): - line 22: # TODO rename x to roi_box_features, if it doesn't increase memory consumption MaskRCNN/pytorch/maskrcnn_benchmark/solver/lr_scheduler.py (1 line): - line 10: # FIXME ideally this would be achieved with a CombinedLRScheduler, MaskRCNN/pytorch/maskrcnn_benchmark/data/samplers/distributed.py (1 line): - line 3: # FIXME remove this once c10d fixes the bug it has MaskRCNN/pytorch/maskrcnn_benchmark/structures/image_list.py (1 line): - line 50: # TODO Ideally, just remove this and let me model handle arbitrary MaskRCNN/pytorch/maskrcnn_benchmark/modeling/roi_heads/box_head/inference.py (1 line): - line 52: # TODO think about a representation of batch of boxes MaskRCNN/pytorch/maskrcnn_benchmark/csrc/nms.h (1 line): - line 16: // TODO raise error if not compiled with CUDA