src/diffusers/models/attention_processor.py (26 lines): - line 302: # but only if it has the default `scale` argument. TODO remove scale_qk check when we move to torch 2.1 - line 353: # but only if it has the default `scale` argument. TODO remove scale_qk check when we move to torch 2.1 - line 496: # but only if it has the default `scale` argument. TODO remove scale_qk check when we move to torch 2.1 - line 526: # but only if it has the default `scale` argument. TODO remove scale_qk check when we move to torch 2.1 - line 736: # TODO: for pipelines such as stable-diffusion, padding cross-attn mask: - line 739: # TODO: re-enable tests/models/test_models_unet_2d_condition.py#test_model_xattn_padding - line 1403: # TODO: add support for attn.scale when we move to Torch 2.1 - line 2061: # TODO: add support for attn.scale when we move to Torch 2.1 - line 2812: # TODO: add support for attn.scale when we move to Torch 2.1 - line 3222: # TODO: add support for attn.scale when we move to Torch 2.1 - line 3316: # TODO: add support for attn.scale when we move to Torch 2.1 - line 3413: # TODO: add support for attn.scale when we move to Torch 2.1 - line 3620: # TODO: add support for attn.scale when we move to Torch 2.1 - line 3750: # TODO: add support for attn.scale when we move to Torch 2.1 - line 3848: # TODO: add support for attn.scale when we move to Torch 2.1 - line 3951: # TODO: add support for attn.scale when we move to Torch 2.1 - line 4056: # TODO: add support for attn.scale when we move to Torch 2.1 - line 4180: # TODO: add support for attn.scale when we move to Torch 2.1 - line 4309: # TODO: add support for attn.scale when we move to Torch 2.1 - line 4400: # TODO: add support for attn.scale when we move to Torch 2.1 - line 4632: # TODO: add support for attn.scale when we move to Torch 2.1 - line 5174: # TODO: add support for attn.scale when we move to Torch 2.1 - line 5239: # TODO: add support for attn.scale when we move to Torch 2.1 - line 5266: # TODO: add support for attn.scale when we move to Torch 2.1 - line 5753: # TODO: add support for attn.scale when we move to Torch 2.1 - line 5854: # TODO: add support for attn.scale when we move to Torch 2.1 src/diffusers/loaders/lora_pipeline.py (6 lines): - line 334: # TODO: replace it with a method from `state_dict_utils` - line 819: # TODO: replace it with a method from `state_dict_utils` - line 2029: # TODO (sayakpaul): to a follow-up to clean and try to unify the conditions. - line 2687: # TODO: consider if this layer needs to be a quantized layer as well if `is_quantized` is True. - line 2720: # TODO: this could lead to more memory overhead if the number of overwritten params - line 2759: # TODO (sayakpaul): Handle the cases when we actually need to expand when using quantization. scripts/convert_if.py (5 lines): - line 282: "upcast_attention": False, # TODO: guessing - line 368: # TODO need better check than i in [4, 8, 12, 16] - line 677: # TODO maybe document and/or can do more efficiently (build indices in for loop and extract once for each split?) - line 848: "upcast_attention": False, # TODO: guessing - line 932: # TODO need better check than i in [4, 8, 12, 16] src/diffusers/schedulers/scheduling_euler_discrete.py (5 lines): - line 244: # TODO: Support the full EDM scalings for all prediction types and timestep types - line 442: # TODO: Support the full EDM scalings for all prediction types and timestep types - line 480: # TODO: Add this logic to the other schedulers - line 506: # TODO: Add this logic to the other schedulers - line 529: # TODO: Add this logic to the other schedulers src/diffusers/pipelines/unidiffuser/modeling_uvit.py (4 lines): - line 297: # TODO (Birch-San): Here we should prepare the encoder_attention mask correctly - line 492: # TODO (Birch-San): Here we should prepare the encoder_attention mask correctly - line 565: use_linear_projection (int, *optional*): TODO: Not used - line 868: use_linear_projection (int, *optional*): TODO: Not used src/diffusers/models/unets/unet_2d_blocks.py (4 lines): - line 997: # TODO: UnCLIP should express cross-attn mask via encoder_attention_mask param instead of via attention_mask. - line 1962: # TODO: UnCLIP should express cross-attn mask via encoder_attention_mask param instead of via attention_mask. - line 3211: # TODO: UnCLIP should express cross-attn mask via encoder_attention_mask param instead of via attention_mask. - line 3540: # TODO: mark emb as non-optional (self.norm2 requires it). src/diffusers/pipelines/wan/pipeline_wan_vace.py (4 lines): - line 495: # TODO: support this - line 509: # TODO: support this - line 559: # TODO: support this - line 572: # TODO: support this src/diffusers/pipelines/unidiffuser/pipeline_unidiffuser.py (3 lines): - line 100: # TODO: support for moving submodules for components with enable_model_cpu_offload - line 150: # TODO: handle safety checking? - line 1359: # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline src/diffusers/schedulers/scheduling_k_dpm_2_discrete.py (3 lines): - line 385: # TODO: Add this logic to the other schedulers - line 411: # TODO: Add this logic to the other schedulers - line 435: # TODO: Add this logic to the other schedulers src/diffusers/schedulers/scheduling_flow_match_euler_discrete.py (3 lines): - line 479: # TODO: Add this logic to the other schedulers - line 505: # TODO: Add this logic to the other schedulers - line 529: # TODO: Add this logic to the other schedulers src/diffusers/schedulers/scheduling_heun_discrete.py (3 lines): - line 384: # TODO: Add this logic to the other schedulers - line 410: # TODO: Add this logic to the other schedulers - line 434: # TODO: Add this logic to the other schedulers src/diffusers/schedulers/scheduling_dpmsolver_singlestep.py (3 lines): - line 478: # TODO: Add this logic to the other schedulers - line 504: # TODO: Add this logic to the other schedulers - line 528: # TODO: Add this logic to the other schedulers src/diffusers/schedulers/scheduling_dpmsolver_multistep_inverse.py (3 lines): - line 406: # TODO: Add this logic to the other schedulers - line 432: # TODO: Add this logic to the other schedulers - line 456: # TODO: Add this logic to the other schedulers src/diffusers/schedulers/scheduling_sasolver.py (3 lines): - line 416: # TODO: Add this logic to the other schedulers - line 442: # TODO: Add this logic to the other schedulers - line 466: # TODO: Add this logic to the other schedulers src/diffusers/schedulers/scheduling_deis_multistep.py (3 lines): - line 387: # TODO: Add this logic to the other schedulers - line 413: # TODO: Add this logic to the other schedulers - line 437: # TODO: Add this logic to the other schedulers src/diffusers/schedulers/scheduling_dpmsolver_multistep.py (3 lines): - line 527: # TODO: Add this logic to the other schedulers - line 566: # TODO: Add this logic to the other schedulers - line 590: # TODO: Add this logic to the other schedulers src/diffusers/schedulers/scheduling_k_dpm_2_ancestral_discrete.py (3 lines): - line 372: # TODO: Add this logic to the other schedulers - line 398: # TODO: Add this logic to the other schedulers - line 422: # TODO: Add this logic to the other schedulers src/diffusers/pipelines/stable_diffusion/pipeline_stable_unclip.py (3 lines): - line 60: ... ) # TODO update model path - line 804: # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline - line 906: # 12. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline src/diffusers/schedulers/scheduling_unipc_multistep.py (3 lines): - line 497: # TODO: Add this logic to the other schedulers - line 523: # TODO: Add this logic to the other schedulers - line 547: # TODO: Add this logic to the other schedulers src/diffusers/schedulers/scheduling_flow_match_lcm.py (3 lines): - line 479: # TODO: Add this logic to the other schedulers - line 505: # TODO: Add this logic to the other schedulers - line 529: # TODO: Add this logic to the other schedulers src/diffusers/schedulers/scheduling_karras_ve_flax.py (2 lines): - line 149: TODO Args: - line 212: Correct the predicted sample based on the output model_output of the network. TODO complete description src/diffusers/schedulers/scheduling_lms_discrete.py (2 lines): - line 388: # TODO: Add this logic to the other schedulers - line 412: # TODO: Add this logic to the other schedulers src/diffusers/pipelines/pixart_alpha/pipeline_pixart_alpha.py (2 lines): - line 880: # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline - line 907: # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can src/diffusers/pipelines/ledits_pp/pipeline_leditspp_stable_diffusion_xl.py (2 lines): - line 1002: # TODO: Check inputs - line 1110: # TODO: fix image encoding src/diffusers/pipelines/paint_by_example/pipeline_paint_by_example.py (2 lines): - line 193: # TODO: feature_extractor is required to encode initial images (if they are in PIL format), - line 583: # 9. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline src/diffusers/schedulers/scheduling_dpmsolver_sde.py (2 lines): - line 473: # TODO: Add this logic to the other schedulers - line 497: # TODO: Add this logic to the other schedulers src/diffusers/pipelines/pixart_alpha/pipeline_pixart_sigma.py (2 lines): - line 806: # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline - line 822: # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can src/diffusers/pipelines/deprecated/versatile_diffusion/modeling_text_unet.py (2 lines): - line 1159: # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can - line 2428: # TODO: UnCLIP should express cross-attn mask via encoder_attention_mask param instead of via attention_mask. src/diffusers/pipelines/stable_diffusion_diffedit/pipeline_stable_diffusion_diffedit.py (2 lines): - line 1046: # TODO: Consider smoothing mask guidance map - line 1491: # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline src/diffusers/models/model_loading_utils.py (2 lines): - line 163: # TODO: maybe refactor a bit this part where we pass a dict here - line 241: # TODO: revisit cases when param.dtype == torch.float8_e4m3fn src/diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero_sdxl.py (2 lines): - line 204: # TODO: add support for attn.scale when we move to Torch 2.1 - line 1205: # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_image_variation.py (2 lines): - line 73: # TODO: feature_extractor is required to encode images (if they are in PIL format), - line 386: # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline src/diffusers/pipelines/latte/pipeline_latte.py (2 lines): - line 787: # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline - line 804: # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can src/diffusers/pipelines/animatediff/pipeline_animatediff_sparsectrl.py (2 lines): - line 655: # TODO: remove below line - line 927: # 8. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline src/diffusers/models/normalization.py (2 lines): - line 477: # TODO: replace with torch layernorm once min required torch version >= 2.1 - line 571: # TODO: (Dhruv) This can be replaced with regular RMSNorm in Mochi once `_keep_in_fp32_modules` is supported src/diffusers/pipelines/pag/pipeline_pag_pixart_sigma.py (2 lines): - line 790: # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline - line 807: # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can src/diffusers/pipelines/pipeline_flax_utils.py (2 lines): - line 158: # TODO: handle inference_state - line 598: # TODO: make it compatible with jax.lax src/diffusers/pipelines/dit/pipeline_dit.py (1 line): - line 187: # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can src/diffusers/pipelines/pag/pipeline_pag_sd.py (1 line): - line 972: # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_inpaint.py (1 line): - line 1522: # 9. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline src/diffusers/pipelines/controlnet_xs/pipeline_controlnet_xs_sd_xl.py (1 line): - line 994: # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline src/diffusers/pipelines/consisid/pipeline_consisid.py (1 line): - line 874: # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline src/diffusers/pipelines/pag/pipeline_pag_hunyuandit.py (1 line): - line 810: # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline src/diffusers/pipelines/controlnet/pipeline_controlnet_union_sd_xl.py (1 line): - line 1380: # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline src/diffusers/pipelines/allegro/pipeline_allegro.py (1 line): - line 883: # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline src/diffusers/pipelines/audioldm2/modeling_audioldm2.py (1 line): - line 763: # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can src/diffusers/pipelines/stable_video_diffusion/pipeline_stable_video_diffusion.py (1 line): - line 632: # TODO: clean up later src/diffusers/loaders/single_file_utils.py (1 line): - line 70: # TODO: find non-Diffusers keys for controlnet_xl src/diffusers/pipelines/deprecated/spectrogram_diffusion/midi_utils.py (1 line): - line 293: pad_end=False, # TODO check why its off by 1 here when True src/diffusers/models/autoencoders/autoencoder_kl_magvit.py (1 line): - line 106: mode="replicate", # TODO: check if this is necessary src/diffusers/models/unets/unet_motion_model.py (1 line): - line 2016: # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can src/diffusers/models/controlnets/controlnet_union.py (1 line): - line 681: # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can src/diffusers/hooks/group_offloading.py (1 line): - line 250: # TODO: we can potentially optimize this code path by checking if the _all_ the desired src/diffusers/pipelines/sana/pipeline_sana_sprint_img2img.py (1 line): - line 887: # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py (1 line): - line 1070: # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline src/diffusers/schedulers/scheduling_lcm.py (1 line): - line 451: # TODO: also reset self.num_inference_steps? src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_upscale.py (1 line): - line 747: # 8. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline src/diffusers/pipelines/stable_diffusion_sag/pipeline_stable_diffusion_sag.py (1 line): - line 754: # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline src/diffusers/pipelines/controlnet/pipeline_controlnet_img2img.py (1 line): - line 1203: # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline src/diffusers/pipelines/deepfloyd_if/pipeline_if_inpainting_superresolution.py (1 line): - line 1046: # 8. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline src/diffusers/models/unets/unet_3d_condition.py (1 line): - line 617: # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can src/diffusers/pipelines/animatediff/pipeline_animatediff.py (1 line): - line 794: # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline src/diffusers/quantizers/quantization_config.py (1 line): - line 425: # TODO: (Dhruv) Add this as an init argument when we can support loading unquantized checkpoints. src/diffusers/pipelines/amused/pipeline_amused_img2img.py (1 line): - line 71: # TODO - when calling self.vqvae.quantize, it uses self.vqvae.quantize.embedding.weight before scripts/convert_kakao_brain_unclip_to_diffusers.py (1 line): - line 886: # TODO maybe document and/or can do more efficiently (build indices in for loop and extract once for each split?) src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_instruct_pix2pix.py (1 line): - line 408: # 8. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_pix2pix_zero.py (1 line): - line 982: # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline src/diffusers/pipelines/controlnet/pipeline_controlnet.py (1 line): - line 1224: # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline src/diffusers/pipelines/pag/pipeline_pag_sd_xl.py (1 line): - line 1129: # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_xl_adapter.py (1 line): - line 1162: # 6.1 Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline src/diffusers/models/unets/unet_i2vgen_xl.py (1 line): - line 566: # TODO: this requires sync between CPU and GPU. So try to pass `timesteps` as tensors if you can src/diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion.py (1 line): - line 905: # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl.py (1 line): - line 1129: # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline src/diffusers/pipelines/latent_consistency_models/pipeline_latent_consistency_text2img.py (1 line): - line 845: # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline src/diffusers/pipelines/pag/pipeline_pag_sana.py (1 line): - line 857: # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline src/diffusers/schedulers/scheduling_sde_ve.py (1 line): - line 220: # TODO is the variable diffusion the correct scaling term for the noise? src/diffusers/pipelines/sana/pipeline_sana_sprint.py (1 line): - line 799: # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline src/diffusers/pipelines/deepfloyd_if/pipeline_if_superresolution.py (1 line): - line 785: # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline scripts/convert_shap_e_to_diffusers.py (1 line): - line 897: # TODO maybe document and/or can do more efficiently (build indices in for loop and extract once for each split?) src/diffusers/pipelines/easyanimate/pipeline_easyanimate.py (1 line): - line 690: # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_upscale.py (1 line): - line 510: # 8. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline src/diffusers/models/transformers/transformer_temporal.py (1 line): - line 275: # TODO: should use out_channels for continuous projections src/diffusers/models/controlnets/controlnet_sparsectrl.py (1 line): - line 667: # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can src/diffusers/pipelines/hunyuandit/pipeline_hunyuandit.py (1 line): - line 790: # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline src/diffusers/models/unets/unet_2d_condition.py (1 line): - line 913: # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can src/diffusers/pipelines/pag/pipeline_pag_controlnet_sd_xl.py (1 line): - line 1370: # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline src/diffusers/pipelines/deepfloyd_if/pipeline_if_img2img.py (1 line): - line 821: # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline src/diffusers/pipelines/stable_diffusion_gligen/pipeline_stable_diffusion_gligen.py (1 line): - line 786: # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline src/diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion_img2img.py (1 line): - line 964: # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline src/diffusers/pipelines/cogvideo/pipeline_cogvideox_video2video.py (1 line): - line 774: # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline src/diffusers/pipelines/stable_diffusion_panorama/pipeline_stable_diffusion_panorama.py (1 line): - line 1026: # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline src/diffusers/pipelines/deepfloyd_if/pipeline_if_inpainting.py (1 line): - line 936: # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline src/diffusers/pipelines/controlnet_hunyuandit/pipeline_hunyuandit_controlnet.py (1 line): - line 917: # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline src/diffusers/pipelines/cogvideo/pipeline_cogvideox_image2video.py (1 line): - line 800: # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline src/diffusers/utils/export_utils.py (1 line): - line 167: # TODO: Dhruv. Remove by Diffusers release 0.33.0 src/diffusers/loaders/lora_conversion_utils.py (1 line): - line 548: # TODO: alphas. src/diffusers/pipelines/pag/pipeline_pag_kolors.py (1 line): - line 936: # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline src/diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl.py (1 line): - line 1386: # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline src/diffusers/pipelines/kolors/pipeline_kolors.py (1 line): - line 908: # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline src/diffusers/pipelines/pag/pipeline_pag_sd_xl_inpaint.py (1 line): - line 1522: # 9. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline src/diffusers/pipelines/stable_diffusion/pipeline_flax_stable_diffusion_img2img.py (1 line): - line 253: # TODO: currently it is assumed `do_classifier_free_guidance = guidance_scale > 1.0` src/diffusers/pipelines/deepfloyd_if/pipeline_if_img2img_superresolution.py (1 line): - line 939: # 8. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline src/diffusers/models/controlnets/controlnet.py (1 line): - line 734: # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint.py (1 line): - line 1237: # 9. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline src/diffusers/models/transformers/hunyuan_transformer_2d.py (1 line): - line 196: # FFN Layer ### TODO: switch norm2 and norm3 in the state dict src/diffusers/image_processor.py (1 line): - line 1050: # depth = [self.convert_to_depth(i) for i in depth] #TODO define convert_to_depth src/diffusers/pipelines/controlnet_xs/pipeline_controlnet_xs.py (1 line): - line 844: # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_inpaint_legacy.py (1 line): - line 721: # 8. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline src/diffusers/pipelines/animatediff/pipeline_animatediff_controlnet.py (1 line): - line 998: # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline src/diffusers/pipelines/consistency_models/pipeline_consistency_models.py (1 line): - line 145: # TODO: should use generator here? int analogue of randn_tensor is not exposed in ...utils src/diffusers/pipelines/controlnet/pipeline_controlnet_union_inpaint_sd_xl.py (1 line): - line 1567: # 9. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline src/diffusers/pipelines/pag/pipeline_pag_sd_animatediff.py (1 line): - line 788: # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline src/diffusers/schedulers/scheduling_sde_ve_flax.py (1 line): - line 219: # TODO is the variable diffusion the correct scaling term for the noise? scripts/extract_lora_from_model.py (1 line): - line 7: for example. (TODO: more reason to add `AutoModel`). src/diffusers/pipelines/pag/pipeline_pag_sd_img2img.py (1 line): - line 1006: # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline src/diffusers/pipelines/i2vgen_xl/pipeline_i2vgen_xl.py (1 line): - line 683: # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline utils/check_repo.py (1 line): - line 90: "TFRobertaForMultipleChoice", # TODO: fix src/diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth.py (1 line): - line 601: # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline src/diffusers/utils/peft_utils.py (1 line): - line 60: # TODO can be removed once that PEFT version is no longer supported. src/diffusers/pipelines/amused/pipeline_amused_inpaint.py (1 line): - line 79: # TODO - when calling self.vqvae.quantize, it uses self.vqvae.quantize.embedding.weight before src/diffusers/pipelines/sana/pipeline_sana.py (1 line): - line 922: # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline src/diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth_img2img.py (1 line): - line 653: # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline src/diffusers/pipelines/stable_diffusion_gligen/pipeline_stable_diffusion_gligen_text_image.py (1 line): - line 966: # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline src/diffusers/pipelines/animatediff/pipeline_animatediff_video2video_controlnet.py (1 line): - line 1239: # 8. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_model_editing.py (1 line): - line 778: # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline src/diffusers/pipelines/controlnet/pipeline_flax_controlnet.py (1 line): - line 261: # TODO: currently it is assumed `do_classifier_free_guidance = guidance_scale > 1.0` src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_paradigms.py (1 line): - line 639: # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline src/diffusers/utils/dynamic_modules_utils.py (1 line): - line 357: # TODO: we will get this info in the etag soon, so retrieve it from there and not here. src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_instruct_pix2pix.py (1 line): - line 858: # 9. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline scripts/convert_kandinsky_to_diffusers.py (1 line): - line 837: # TODO maybe document and/or can do more efficiently (build indices in for loop and extract once for each split?) src/diffusers/pipelines/ltx/pipeline_ltx_latent_upsample.py (1 line): - line 206: # Batched video input is not yet tested/supported. TODO: take a look later src/diffusers/pipelines/cogvideo/pipeline_cogvideox_fun_control.py (1 line): - line 754: # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline scripts/convert_svd_to_diffusers.py (1 line): - line 382: # TODO resnet time_mixer.mix_factor src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_depth2img.py (1 line): - line 835: # 8. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline src/diffusers/pipelines/pag/pipeline_pag_controlnet_sd_xl_img2img.py (1 line): - line 1460: # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline src/diffusers/pipelines/stable_diffusion_ldm3d/pipeline_stable_diffusion_ldm3d.py (1 line): - line 951: # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline src/diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl_img2img.py (1 line): - line 1473: # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_cycle_diffusion.py (1 line): - line 836: # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline src/diffusers/pipelines/deepfloyd_if/pipeline_if.py (1 line): - line 700: # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline src/diffusers/pipelines/easyanimate/pipeline_easyanimate_control.py (1 line): - line 914: # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline src/diffusers/pipelines/stable_diffusion_attend_and_excite/pipeline_stable_diffusion_attend_and_excite.py (1 line): - line 918: # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline src/diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero.py (1 line): - line 181: # TODO: add support for attn.scale when we move to Torch 2.1 src/diffusers/pipelines/pia/pipeline_pia.py (1 line): - line 883: # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline src/diffusers/experimental/rl/value_guided_sampling.py (1 line): - line 114: # TODO: verify deprecation of this kwarg src/diffusers/pipelines/animatediff/pipeline_animatediff_video2video.py (1 line): - line 985: # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline src/diffusers/models/controlnets/controlnet_xs.py (1 line): - line 1084: # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can src/diffusers/pipelines/stable_diffusion/pipeline_flax_stable_diffusion.py (1 line): - line 236: # TODO: currently it is assumed `do_classifier_free_guidance = guidance_scale > 1.0` src/diffusers/pipelines/animatediff/pipeline_animatediff_sdxl.py (1 line): - line 1143: # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline src/diffusers/pipelines/sana/pipeline_sana_controlnet.py (1 line): - line 1005: # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline src/diffusers/pipelines/controlnet/pipeline_controlnet_union_sd_xl_img2img.py (1 line): - line 1411: # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline src/diffusers/models/transformers/sana_transformer.py (1 line): - line 170: # TODO: add support for attn.scale when we move to Torch 2.1 src/diffusers/pipelines/cogvideo/pipeline_cogvideox.py (1 line): - line 693: # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline src/diffusers/pipelines/controlnet/pipeline_controlnet_inpaint_sd_xl.py (1 line): - line 1667: # 9. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline src/diffusers/pipelines/latent_consistency_models/pipeline_latent_consistency_img2img.py (1 line): - line 915: # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline src/diffusers/pipelines/pag/pipeline_pag_controlnet_sd.py (1 line): - line 1157: # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline src/diffusers/schedulers/scheduling_tcd.py (1 line): - line 477: # TODO: also reset self.num_inference_steps? src/diffusers/configuration_utils.py (1 line): - line 115: # TODO: remove this when we remove the deprecation warning, and the `kwargs` argument, src/diffusers/pipelines/stable_diffusion/pipeline_stable_unclip_img2img.py (1 line): - line 811: # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline src/diffusers/pipelines/easyanimate/pipeline_easyanimate_inpaint.py (1 line): - line 1138: # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline src/diffusers/pipelines/pag/pipeline_pag_controlnet_sd_inpaint.py (1 line): - line 1351: # 7.2 Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline src/diffusers/pipelines/stable_diffusion/pipeline_flax_stable_diffusion_inpaint.py (1 line): - line 291: # TODO: currently it is assumed `do_classifier_free_guidance = guidance_scale > 1.0` src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_adapter.py (1 line): - line 868: # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py (1 line): - line 1009: # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline src/diffusers/models/unets/unet_spatio_temporal_condition.py (1 line): - line 398: # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can src/diffusers/pipelines/cogview3/pipeline_cogview3plus.py (1 line): - line 585: # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline src/diffusers/pipelines/controlnet/pipeline_controlnet_inpaint.py (1 line): - line 1374: # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline src/diffusers/pipelines/hunyuan_video/pipeline_hunyuan_video_framepack.py (1 line): - line 857: # TODO: find a more generic way in future if there are more checkpoints src/diffusers/pipelines/mochi/pipeline_mochi.py (1 line): - line 188: # TODO: determine these scaling factors from model parameters src/diffusers/pipelines/lumina/pipeline_lumina.py (1 line): - line 827: # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can