def __post_init__()

in ultravox/training/config_base.py [0:0]


    def __post_init__(self):
        assert self.data_type in ["bfloat16", "float16", "float32"]

        if self.device == "cuda" and not torch.cuda.is_available():
            self.device = "mps" if torch.backends.mps.is_available() else "cpu"
        if self.device != "cuda":
            if self.data_type == "bfloat16":
                self.data_type = "float32"
            if self.optimizer == "adamw_bnb_8bit":
                logging.warning(
                    "Using CPU with adamw_bnb_8bit is not supported. Switching to adamw_torch"
                )
                self.optimizer = "adamw_torch"

        if self.exp_name is None:
            self.exp_name = datetime.datetime.now().strftime("exp--%Y-%m-%d--%H-%M-%S")
        if self.output_dir is None:
            self.output_dir = Path("runs") / self.exp_name

        if self.resume_from_load_dir:
            assert bool(
                self.model_load_dir
            ), "model_load_dir must be set if resume_from_load_dir is True"

        # HF Pipeline gets tripped up if the path has a "." in it
        self.output_dir = Path(str(self.output_dir).replace(".", "--"))

        if self.logs_dir is None:
            self.logs_dir = self.output_dir / "logs"

        if (
            self.audio_model_lora_config is not None
            and self.audio_model_lora_config.r > 0
            and os.environ.get("WORLD_SIZE", None) is not None
            and self.disable_layerdrop is False
        ):
            logging.warning(
                "LayerDrop cannot be used in DDP when encoder is not frozen. Disabling LayerDrop."
            )
            self.disable_layerdrop = True

        if self.use_fsdp and self.do_eval:
            logging.warning(
                "FSDP is enabled: Evaluation is not supported with FSDP. Disabling evaluation."
            )
            self.do_eval = False