utils_nlp/models/transformers/question_answering.py (4 lines): - line 300: # TODO: maybe generalize the following code - line 1004: # TODO: double check this - line 1639: # TODO: Should we set p_mask = 1 for cls token? - line 1667: # TODO: maybe this can be improved to compute utils_nlp/models/transformers/bertsum/predictor.py (2 lines): - line 162: # TODO: faster code path for beam_size == 1. - line 164: # TODO: support these blacklisted features. utils_nlp/eval/SentEval/senteval/tools/validation.py (2 lines): - line 216: # TODO: Find a hack for reducing nb epoches in SNLI - line 237: # TODO: Find a hack for reducing nb epoches in SNLI utils_nlp/eval/SentEval/senteval/mrpc.py (1 line): - line 34: # TODO : Should we separate samples in "train, test"? utils_nlp/models/transformers/common.py (1 line): - line 186: # TODO: Is this while necessary??? utils_nlp/eval/SentEval/senteval/probing.py (1 line): - line 137: # TODO: Voice?