torchbenchmark/score/generate_score_config.py (10 lines): - line 66: assert 'weight' not in category_spec, "TODO implement manual category weights" - line 73: assert 'weight' not in tasks, "TODO implement manual domain weights" - line 80: assert 'weight' not in benchmarks, "TODO implement manual task weights" - line 85: assert benchmarks[benchmark] is None, "TODO handle benchmark as dict of config specs" - line 86: # assert 'weight' not in benchmarks[benchmark], "TODO implement manual benchmark weights" - line 128: assert 'weight' not in category_spec, "TODO implement manual category weights" - line 133: assert 'weight' not in tasks, "TODO implement manual domain weights" - line 138: assert 'weight' not in benchmarks, "TODO implement manual task weights" - line 141: assert benchmarks[benchmark] is None, "TODO handle benchmark as dict of config specs" - line 142: # assert 'weight' not in benchmarks[benchmark], "TODO implement manual benchmark weights" torchbenchmark/__init__.py (3 lines): - line 340: # FIXME: Models will use context "with torch.no_grad():", so the lifetime of no_grad will end after the eval(). - line 341: # FIXME: Must incorporate this "torch.is_grad_enabled()" inside of actual eval() func. - line 449: # TODO: deduplicate with `torchbenchmark.util.model.no_grad` torchbenchmark/models/demucs/demucs/tasnet.py (3 lines): - line 281: # TODO: when P = 3 here works fine, but when P = 2 maybe need to pad? - line 362: # TODO: Use nn.LayerNorm to impl cLN to speed up - line 407: # TODO: in torch 1.0, torch.mean() support dim list torchbenchmark/models/attention_is_all_you_need_pytorch/transformer/Translator.py (2 lines): - line 88: # TODO: expand to batch operation. - line 110: # TODO: Try different terminate conditions. torchbenchmark/util/torchtext_legacy/translation.py (2 lines): - line 106: # TODO: This is a _HORRIBLE_ patch related to #208 - line 223: # TODO: This is a _HORRIBLE_ patch related to #208 torchbenchmark/models/speech_transformer/speech_transformer/data/data.py (2 lines): - line 24: TODO: this is a little HACK now, put batch_size here now. - line 128: # TODO: perform subsamping torchbenchmark/models/attention_is_all_you_need_pytorch/translate.py (2 lines): - line 58: # TODO: Translate bpe encoded files - line 63: # TODO: Batch translation torchbenchmark/models/attention_is_all_you_need_pytorch/preprocess.py (1 line): - line 332: # TODO: Also update the `freq`, although it is not likely to be used. torchbenchmark/util/torchtext_legacy/vocab.py (1 line): - line 14: # TODO (@mttk): Populate classs with default values of special symbols torchbenchmark/models/moco/main_lincls.py (1 line): - line 397: # TODO: this should also be done with the ProgressMeter torchbenchmark/microbenchmarks/nvfuser/__init__.py (1 line): - line 10: # TODO - a lot of this was copied from pytorch/jit/scripts/log_extract.py, gen_summary_metadata.py (1 line): - line 182: # TODO: Modify and update the model to apply metadata changes by the user. torchbenchmark/models/attention_is_all_you_need_pytorch/transformer/Models.py (1 line): - line 33: # TODO: make it with torch instead of numpy torchbenchmark/e2e_models/hf_bert/__init__.py (1 line): - line 33: # TODO: currently only support 1 GPU device torchbenchmark/models/tts_angular/model.py (1 line): - line 58: # TODO: implement state passing for lstms torchbenchmark/models/tacotron2/__init__.py (1 line): - line 24: # TODO - currently load_model assumes cuda