in threestudio/models/guidance/zero123_guidance.py [0:0]
def load_model_from_config(config, ckpt, device, vram_O=True, verbose=False):
pl_sd = torch.load(ckpt, map_location="cpu")
if "global_step" in pl_sd and verbose:
print(f'[INFO] Global Step: {pl_sd["global_step"]}')
sd = pl_sd["state_dict"]
model = instantiate_from_config(config.model)
m, u = model.load_state_dict(sd, strict=False)
if len(m) > 0 and verbose:
print("[INFO] missing keys: \n", m)
if len(u) > 0 and verbose:
print("[INFO] unexpected keys: \n", u)
# manually load ema and delete it to save GPU memory
if model.use_ema:
if verbose:
print("[INFO] loading EMA...")
model.model_ema.copy_to(model.model)
del model.model_ema
if vram_O:
# we don't need decoder
del model.first_stage_model.decoder
torch.cuda.empty_cache()
model.eval().to(device)
return model