archived/text-to-image-fine-tuning/config/kohya-sdxl-config.toml (6 lines): - line 54: #reg_data_dir = #TODO: add support for regularization. "" #directory for regularization images. - line 55: #sample_every_n_epochs = #TODO: add support for sampling. 0 #generate sample images every N epochs (overwrites n_steps) - line 56: #sample_every_n_steps = #TODO: add support for sampling. 100 #generate sample images every N steps - line 57: #sample_prompts= #TODO: add support for sampling. #"/sample/prompt.txt" - line 58: #sample_prompts = #TODO: add support for sampling. " man wearing sunglasses and a leather jacket, natural lighting, highly detailed, cinematic" #file for prompts to generate sample images - line 59: #sample_sampler = #TODO: add support for sampling. "dpm_2" #sampler (scheduler) type for sample images. Choices: "ddim", "pndm", "lms", "euler", "euler_a", "heun", "dpm_2", "dpm_2_a", "dpmsolver", "dpmsolver++", "dpmsingle", "k_lms", "k_euler", "k_euler_a", "k_dpm_2", "k_dpm_2_a" archived/rl_gamerserver_ray/common/sagemaker_rl/orchestrator/clients/ddb/model_db_client.py (5 lines): - line 12: TODO: Deprecate and embed this class in ModelRecord. - line 56: # TODO: a model can only be put to pending, from pending state. - line 63: # TODO: conditional check to verify model is in *ing state while updating... - line 68: # TODO: a model eval_state can only be put to pending, from pending state - line 73: # TODO: conditional check to verify model is in *ing state while updating... archived/bandits_recsys_movielens_testbed/src/vw_agent.py (4 lines): - line 115: # TODO: Check for errors in CLI args by polling the process - line 161: # TODO: Error handling in parsing the given example - line 177: # TODO: Error handling in parsing the given example - line 184: # TODO: Write to stdin in chunks so that PIPE buffer never overflows archived/rl_gamerserver_ray/common/sagemaker_rl/ray_launcher.py (3 lines): - line 101: # TODO: use ConfigList from Coach launcher, and share customization code. - line 105: # TODO: move this to before customer-specified so they can override - line 111: self.hyperparameters = ConfigurationList() # TODO: move to shared build_and_train_models/sm-introduction_to_object2vec_sentence_similarity/sm-introduction_to_object2vec_sentence_similarity.ipynb (2 lines): - line 222: " TODO: Better handling of the case\n", - line 1076: " sys_scores.append(get_cosine_similarity(emb1, emb2)) # TODO: implement this\n", archived/inference_pipeline_custom_containers/containers/postprocessor/docker/code/predictor.py (2 lines): - line 146: # TODO: use custom flag to indicate that this is in a pipeline rather than relying on the '*/*' - line 148: # TODO: this is wrong. fix it archived/bandits_recsys_movielens_testbed/src/env.py (2 lines): - line 85: # TODO: Randomize user selection - line 121: # TODO: Implement PBM: Position based model archived/rl_gamerserver_ray/common/sagemaker_rl/coach_launcher.py (2 lines): - line 122: # TODO: remove this after converting all samples. - line 289: # TODO: Remove this whole class when nobody's using it any more. archived/language-modeling/scripts/run_mlm.py (2 lines): - line 26: # TODO Do multi-GPU and TPU tests and make sure the dataset length works as expected - line 27: # TODO Duplicate all changes over to the CLM script archived/multi_modal_parallel_sagemaker_labeling_workflows_with_step_functions/src/lambda_src/api_workforce_show/main.py (1 line): - line 56: # TODO: Can add additional user attributes here. archived/smp-train-t5-sharded-data-parallel/train.py (1 line): - line 493: # TODO if this happens, we should try num_workers>1 in dataloader # pylint: disable=fixme archived/smp-train-t5-sharded-data-parallel/data_pipeline.py (1 line): - line 169: # TODO: set sampler.epoch to correctly shuffle across epochs, else same order will be used for all epochs archived/tensorflow2-california-housing-sagemaker-pipelines-deploy-endpoint/tensorflow2-california-housing-sagemaker-pipelines-deploy-endpoint.ipynb (1 line): - line 758: " # TODO - ADD YOUR CODE TO SEND EMAIL...\n", archived/Text_Classification_BERT/scripts/train_bert.py (1 line): - line 64: # TODO: ensure batch and model are on the same device in SiftDataloader so that the customer archived/smp-gpt-sharded-data-parallel/train.py (1 line): - line 488: # TODO if this happens, we should try num_workers>1 in dataloader # pylint: disable=fixme archived/rl_gamerserver_ray/common/sagemaker_rl/orchestrator/workflow/manager/experiment_manager.py (1 line): - line 1185: # TODO: add validation/instructions if multiple deployment archived/ap-batch-transform.ipynb (1 line): - line 160: "# TODO: stratified sampling\n", archived/smp-train-gptj-sharded-data-parallel-tp/data_pipeline.py (1 line): - line 169: # TODO: set sampler.epoch to correctly shuffle across epochs, else same order will be used for all epochs archived/multi_modal_parallel_sagemaker_labeling_workflows_with_step_functions/src/lambda_src/shared/db.py (1 line): - line 563: # TODO: Replace with get_item archived/falcon/train.py (1 line): - line 493: # TODO if this happens, we should try num_workers>1 in dataloader # pylint: disable=fixme archived/end_to_end_music_recommendation/code/demo_helpers.py (1 line): - line 251: # TODO: flesh out docstrings archived/smp-gpt-sharded-data-parallel/data_pipeline.py (1 line): - line 169: # TODO: set sampler.epoch to correctly shuffle across epochs, else same order will be used for all epochs archived/bandits_statlog_vw_customEnv/src/vw_model.py (1 line): - line 156: # TODO: Error handling in parsing the given example archived/sagemaker-autopilot-pipelines/autopilot_pipelines_demo_notebook.ipynb (1 line): - line 155: "# TODO: need to replace the lambda execution role name by its actual value\n", archived/multi_modal_parallel_sagemaker_labeling_workflows_with_step_functions/src/lambda_src/api_batch_create/main.py (1 line): - line 98: # TODO: find more specific exception for resource not found archived/smp-train-gpt-neox-sharded-data-parallel/data_pipeline.py (1 line): - line 169: # TODO: set sampler.epoch to correctly shuffle across epochs, else same order will be used for all epochs build_and_train_models/sm-distributed_model_parallel_v2/shared-scripts/fsdp_utils.py (1 line): - line 63: # TODO: Add support for Block archived/deep_demand_forecasting/lstnet/inference.py (1 line): - line 30: # TODO: customize serde archived/multi_modal_parallel_sagemaker_labeling_workflows_with_step_functions/src/lambda_src/step_functions_send_second_level_sns_and_check_response/main.py (1 line): - line 16: # TODO: Selection should be based on frames in DDB table build_and_train_models/sm-distributed_model_parallel_v2/shared-scripts/data/prep/_prepare_nemo_megatron_dataset.py (1 line): - line 301: # TODO: There are dependencies b/w libraries and model files / tokenizer type strings to check. build_and_train_models/sm-object_detection_birds/sm-object_detection_birds.ipynb (1 line): - line 461: " # TODO: add progress bar\n", archived/fraud_detection/demo_helpers.py (1 line): - line 112: # TODO: flesh out docstrings archived/inference_pipeline_custom_containers/containers/preprocessor/docker/code/predictor.py (1 line): - line 124: # TODO: use custom flag to indicate that this is in a pipeline rather than relying on the '*/*' build_and_train_models/sm-heterogeneous_clusters_for_model_training/code/train_dnn.py (1 line): - line 135: # TODO: exit the loop through the iterator stopping by itself build_and_train_models/sm-distributed_model_parallel_v2/shared-scripts/data/pipelines/data_pipeline.py (1 line): - line 73: # TODO: set sampler.epoch to correctly shuffle across epochs, else same order will be used for archived/smp-train-gptj-sharded-data-parallel-tp/train.py (1 line): - line 488: # TODO if this happens, we should try num_workers>1 in dataloader # pylint: disable=fixme archived/smp-train-gpt-neox-sharded-data-parallel/train.py (1 line): - line 488: # TODO if this happens, we should try num_workers>1 in dataloader # pylint: disable=fixme archived/falcon/data_pipeline.py (1 line): - line 169: # TODO: set sampler.epoch to correctly shuffle across epochs, else same order will be used for all epochs