threestudio/models/guidance/stable_diffusion_bsd_guidance.py (4 lines): - line 167: # FIXME: hard-coded dims - line 468: latents = self.get_latents(rgb_BCHW, rgb_as_latents=False) # TODO: 有部分概率是du或者ref image - line 730: # TODO: more general cases - line 1001: # FIXME: use view-independent or dependent embeddings? threestudio/models/renderers/nvdiff_rasterizer.py (3 lines): - line 79: # FIXME: paste texture back to mesh - line 127: # TODO: make is clear whether to compute this. - line 143: # TODO: make it clear whether to compute the normal, now we compute it in all cases threestudio/models/guidance/stable_diffusion_vsd_guidance.py (3 lines): - line 164: # FIXME: hard-coded dims - line 410: latents = self.get_latents(rgb_BCHW, rgb_as_latents=False) # TODO: 有部分概率是du或者ref image - line 699: # TODO: more general cases threestudio/systems/dreamcraft3d.py (3 lines): - line 180: # FIXME: reverse x axis - line 211: # FIXME: use mixed camera config - line 571: # FIXME: save camera extrinsics threestudio/models/guidance/deep_floyd_guidance.py (3 lines): - line 25: # FIXME: xformers error - line 257: noise = torch.randn_like(latents) # TODO: use torch generator - line 318: # # FIXME: Visualize inpainting results threestudio/models/networks.py (2 lines): - line 104: # TODO frame_time only supports batch_size == 1 cases - line 210: ) # FIXME: hard coded threestudio/models/guidance/zero123_guidance.py (2 lines): - line 106: # TODO: seems it cannot load into fp16... - line 297: noise = torch.randn_like(latents) # TODO: use torch generator threestudio/models/guidance/controlnet_guidance.py (2 lines): - line 325: noise = torch.randn_like(latents) # TODO: use torch generator - line 468: text_embeddings = prompt_utils.get_text_embeddings(temp, azimuth, camera_distance, view_dependent_prompt) # FIXME: change to view-conditioned prompt threestudio/data/image.py (2 lines): - line 64: # FIXME: - line 167: ) # FIXME: hard-coded near and far threestudio/models/geometry/implicit_volume.py (2 lines): - line 146: # TODO: use raw density - line 279: # FIXME: use progressive normal eps threestudio/models/guidance/stable_diffusion_guidance.py (2 lines): - line 261: noise = torch.randn_like(latents) # TODO: use torch generator - line 360: noise = torch.randn_like(latents) # TODO: use torch generator threestudio/data/images.py (2 lines): - line 64: # FIXME: - line 167: ) # FIXME: hard-coded near and far threestudio/systems/zero123.py (2 lines): - line 143: # claforte: TODO: rename the loss_terms keys - line 301: # claforte: TODO: don't hardcode the frame numbers to record... read them from cfg instead. threestudio/models/exporters/mesh_exporter.py (2 lines): - line 21: fmt: str = "obj-mtl" # in ['obj-mtl', 'obj'], TODO: fbx - line 132: # TODO: map_Ks threestudio/models/guidance/stable_zero123_guidance.py (2 lines): - line 103: # TODO: seems it cannot load into fp16... - line 295: noise = torch.randn_like(latents) # TODO: use torch generator threestudio/data/uncond.py (2 lines): - line 326: ) # FIXME: hard-coded near and far - line 430: ) # FIXME: hard-coded near and far threestudio/models/guidance/stable_diffusion_unified_guidance.py (2 lines): - line 150: # FIXME: hard-coded output dim - line 262: # FIXME: pipe.__call__ requires text_encoder.dtype threestudio/systems/base.py (1 line): - line 259: ) # TODO: hard-coded relative path threestudio/models/guidance/controlnet_reg_guidance.py (1 line): - line 334: noise = torch.randn_like(latents) # TODO: use torch generator threestudio/models/guidance/zero123_unified_guidance.py (1 line): - line 138: # FIXME: hard-coded output dim threestudio/utils/callbacks.py (1 line): - line 72: | set( # hard code, TODO: use config to exclude folders or files launch.py (1 line): - line 248: # FIXME: no effect, stdout is not captured threestudio/scripts/train_dreambooth_lora.py (1 line): - line 715: # TODO (sayakpaul): Remove this check when gradient accumulation with two models is enabled in accelerate. threestudio/scripts/train_dreambooth.py (1 line): - line 876: # TODO (patil-suraj): Remove this check when gradient accumulation with two models is enabled in accelerate. threestudio/models/isosurface.py (1 line): - line 137: ) # FIXME: hard-coded activation threestudio/models/prompt_processors/deepfloyd_prompt_processor.py (1 line): - line 33: ) # FIXME: behavior of auto device map in multi-GPU training threestudio/models/prompt_processors/base.py (1 line): - line 223: self._cache_dir = ".threestudio_cache/text_embeddings" # FIXME: hard-coded path