crypten/nn/privacy/dp_split.py (5 lines): - line 199: # TODO: Handle batch dimension here - line 294: # TODO: Implement DP properly to make correct DP guarantees - line 295: # TODO: Implement custom DP mechanism (split noise / magnitude) - line 339: # TODO: Async / parallelize this - line 448: # TODO: make this optional? crypten/gradients.py (5 lines): - line 1485: # TODO: Eliminate dependency on torch internal function by implementing in util - line 1616: # TODO: Implement conv1d gradient under following condition: - line 1623: # TODO: Eliminate dependency on torch internal function by implementing in util - line 1696: # TODO: Implement conv2d gradient under following condition: - line 1703: # TODO: Eliminate dependency on torch internal function by implementing in util crypten/mpc/provider/provider.py (3 lines): - line 61: # TODO: Deal with any overwrite issues - line 78: # TODO: Deal with any overwrite issues - line 145: # TODO: parallelize / async this crypten/__init__.py (3 lines): - line 326: # TODO: Encrypt modules before returning them - line 350: # TODO: Add support for loading from correct device (kwarg: map_location=device) - line 398: # TODO: Add support for saving to correct device (kwarg: map_location=device) crypten/cryptensor.py (3 lines): - line 153: # TODO: Automatically register all these functions in CrypTensor? - line 444: # TODO: Add validation_mode / validate_correctness - line 581: # TODO: Rename this to __copy__()? crypten/common/functions/approximations.py (2 lines): - line 307: # TODO: Set these with configurable parameters - line 319: # TODO: Support addition with different encoder scales crypten/mpc/primitives/arithmetic.py (2 lines): - line 448: # TODO: Add test coverage for this code path (next 4 lines) - line 558: # TODO: Add check for whether ceil_mode would change size of output and allow ceil_mode when it wouldn't crypten/mpc/mpc.py (2 lines): - line 80: # TODO: Rename this to __deepcopy__()? - line 88: # TODO: Rename this to __copy__()? crypten/nn/module.py (2 lines): - line 1234: # TODO: ONNX specification says the permutation should be - line 1999: # TODO: Eliminate copy-pasta by implementing _Conv parent class crypten/cuda/cuda_tensor.py (2 lines): - line 145: # TODO: Rename this to __copy__()? - line 151: # TODO: Rename this to __deepcopy__()? crypten/common/util.py (1 line): - line 57: # FIXME: pytorch currently does not register `torch.cat` and crypten/mpc/primitives/beaver.py (1 line): - line 152: # TODO: Incorporate eta_xr crypten/nn/onnx_converter.py (1 line): - line 231: return attr.s # TODO: Sanitize string. crypten/mpc/primitives/binary.py (1 line): - line 249: # TODO: Remove explicit broadcasts to allow smaller beaver triples crypten/common/rng.py (1 line): - line 19: # TODO (brianknott): Check whether this RNG contains the full range we want. crypten/communicator/communicator.py (1 line): - line 166: # TODO: Replace this crypten/common/functions/pooling.py (1 line): - line 44: # TODO: Find a better solution for padding with max_pooling crypten/mpc/provider/ttp_provider.py (1 line): - line 39: # TODO: Compute size without executing computation crypten/models/__init__.py (1 line): - line 123: # TODO: fix replacement in global `torch` module - perhaps use __torch_function__ scripts/aws_launcher.py (1 line): - line 270: # TODO: Although paramiko.SSHClient.exec_command() can accept