def model_from_config()

in shap_e/models/configs.py [0:0]


def model_from_config(config: Union[str, Dict[str, Any]], device: torch.device) -> nn.Module:
    if isinstance(config, str):
        with bf.BlobFile(config, "rb") as f:
            obj = yaml.load(f, Loader=yaml.SafeLoader)
        return model_from_config(obj, device=device)

    config = config.copy()
    name = config.pop("name")

    if name == "PointCloudTransformerEncoder":
        return PointCloudTransformerEncoder(device=device, dtype=torch.float32, **config)
    elif name == "PointCloudPerceiverEncoder":
        return PointCloudPerceiverEncoder(device=device, dtype=torch.float32, **config)
    elif name == "PointCloudTransformerChannelsEncoder":
        return PointCloudTransformerChannelsEncoder(device=device, dtype=torch.float32, **config)
    elif name == "PointCloudPerceiverChannelsEncoder":
        return PointCloudPerceiverChannelsEncoder(device=device, dtype=torch.float32, **config)
    elif name == "MultiviewTransformerEncoder":
        return MultiviewTransformerEncoder(device=device, dtype=torch.float32, **config)
    elif name == "Transmitter":
        renderer = model_from_config(config.pop("renderer"), device=device)
        param_shapes = {
            k: v.shape[1:] for k, v in batch_meta_state_dict(renderer, batch_size=1).items()
        }
        encoder_config = config.pop("encoder").copy()
        encoder_config["param_shapes"] = param_shapes
        encoder = model_from_config(encoder_config, device=device)
        return Transmitter(encoder=encoder, renderer=renderer, **config)
    elif name == "VectorDecoder":
        renderer = model_from_config(config.pop("renderer"), device=device)
        param_shapes = {
            k: v.shape[1:] for k, v in batch_meta_state_dict(renderer, batch_size=1).items()
        }
        return VectorDecoder(param_shapes=param_shapes, renderer=renderer, device=device, **config)
    elif name == "ChannelsDecoder":
        renderer = model_from_config(config.pop("renderer"), device=device)
        param_shapes = {
            k: v.shape[1:] for k, v in batch_meta_state_dict(renderer, batch_size=1).items()
        }
        return ChannelsDecoder(
            param_shapes=param_shapes, renderer=renderer, device=device, **config
        )
    elif name == "OneStepNeRFRenderer":
        config = config.copy()
        for field in [
            # Required
            "void_model",
            "foreground_model",
            "volume",
            # Optional to use NeRF++
            "background_model",
            "outer_volume",
        ]:
            if field in config:
                config[field] = model_from_config(config.pop(field).copy(), device)
        return OneStepNeRFRenderer(device=device, **config)
    elif name == "TwoStepNeRFRenderer":
        config = config.copy()
        for field in [
            # Required
            "void_model",
            "coarse_model",
            "fine_model",
            "volume",
            # Optional to use NeRF++
            "coarse_background_model",
            "fine_background_model",
            "outer_volume",
        ]:
            if field in config:
                config[field] = model_from_config(config.pop(field).copy(), device)
        return TwoStepNeRFRenderer(device=device, **config)
    elif name == "PooledMLP":
        return PooledMLP(device, **config)
    elif name == "PointDiffusionTransformer":
        return PointDiffusionTransformer(device=device, dtype=torch.float32, **config)
    elif name == "PointDiffusionPerceiver":
        return PointDiffusionPerceiver(device=device, dtype=torch.float32, **config)
    elif name == "CLIPImagePointDiffusionTransformer":
        return CLIPImagePointDiffusionTransformer(device=device, dtype=torch.float32, **config)
    elif name == "CLIPImageGridPointDiffusionTransformer":
        return CLIPImageGridPointDiffusionTransformer(device=device, dtype=torch.float32, **config)
    elif name == "UpsamplePointDiffusionTransformer":
        return UpsamplePointDiffusionTransformer(device=device, dtype=torch.float32, **config)
    elif name == "CLIPImageGridUpsamplePointDiffusionTransformer":
        return CLIPImageGridUpsamplePointDiffusionTransformer(
            device=device, dtype=torch.float32, **config
        )
    elif name == "SplitVectorDiffusion":
        inner_config = config.pop("inner")
        d_latent = config.pop("d_latent")
        latent_ctx = config.pop("latent_ctx", 1)
        inner_config["input_channels"] = d_latent // latent_ctx
        inner_config["n_ctx"] = latent_ctx
        inner_config["output_channels"] = d_latent // latent_ctx * 2
        inner_model = model_from_config(inner_config, device)
        return SplitVectorDiffusion(
            device=device, wrapped=inner_model, n_ctx=latent_ctx, d_latent=d_latent
        )
    elif name == "STFRenderer":
        config = config.copy()
        for field in ["sdf", "tf", "volume"]:
            config[field] = model_from_config(config.pop(field), device)
        return STFRenderer(device=device, **config)
    elif name == "NeRSTFRenderer":
        config = config.copy()
        for field in ["sdf", "tf", "nerstf", "void", "volume"]:
            if field not in config:
                continue
            config[field] = model_from_config(config.pop(field), device)
        config.setdefault("sdf", None)
        config.setdefault("tf", None)
        config.setdefault("nerstf", None)
        return NeRSTFRenderer(device=device, **config)

    model_cls = {
        "MLPSDFModel": MLPSDFModel,
        "MLPTextureFieldModel": MLPTextureFieldModel,
        "MLPNeRFModel": MLPNeRFModel,
        "MLPDensitySDFModel": MLPDensitySDFModel,
        "MLPNeRSTFModel": MLPNeRSTFModel,
        "VoidNeRFModel": VoidNeRFModel,
        "BoundingBoxVolume": BoundingBoxVolume,
        "SphericalVolume": SphericalVolume,
        "UnboundedVolume": UnboundedVolume,
    }[name]
    return model_cls(device=device, **config)