in src/accelerate/utils/dataclasses.py [0:0]
def __post_init__(self):
from .deepspeed import HfDeepSpeedConfig
if self.gradient_accumulation_steps is None:
gas = os.environ.get("ACCELERATE_GRADIENT_ACCUMULATION_STEPS", "auto")
self.gradient_accumulation_steps = int(gas) if gas.isdigit() else gas
if self.gradient_clipping is None:
gradient_clipping = os.environ.get("ACCELERATE_GRADIENT_CLIPPING", "auto")
self.gradient_clipping = gradient_clipping if gradient_clipping == "auto" else float(gradient_clipping)
if self.zero_stage is None:
self.zero_stage = int(os.environ.get("ACCELERATE_DEEPSPEED_ZERO_STAGE", 2))
if self.offload_optimizer_device is None:
self.offload_optimizer_device = os.environ.get("ACCELERATE_DEEPSPEED_OFFLOAD_OPTIMIZER_DEVICE", "none")
if self.offload_param_device is None:
self.offload_param_device = os.environ.get("ACCELERATE_DEEPSPEED_OFFLOAD_PARAM_DEVICE", "none")
if self.offload_optimizer_nvme_path is None:
self.offload_optimizer_nvme_path = os.environ.get(
"ACCELERATE_DEEPSPEED_OFFLOAD_OPTIMIZER_NVME_PATH", "none"
)
if self.offload_param_nvme_path is None:
self.offload_param_nvme_path = os.environ.get("ACCELERATE_DEEPSPEED_OFFLOAD_PARAM_NVME_PATH", "none")
if self.zero3_save_16bit_model is None:
self.zero3_save_16bit_model = (
os.environ.get("ACCELERATE_DEEPSPEED_ZERO3_SAVE_16BIT_MODEL", "false") == "true"
)
if self.enable_msamp is None:
self.enable_msamp = os.environ.get("ACCELERATE_FP8_BACKEND", None) == "MSAMP"
if self.msamp_opt_level is None:
self.msamp_opt_level = os.environ.get("ACCELERATE_FP8_OPT_LEVEL", "O1")
if self.hf_ds_config is None:
self.hf_ds_config = os.environ.get("ACCELERATE_DEEPSPEED_CONFIG_FILE", "none")
if (
isinstance(self.hf_ds_config, dict)
or (isinstance(self.hf_ds_config, str) and self.hf_ds_config != "none")
or isinstance(self.hf_ds_config, HfDeepSpeedConfig)
):
if not isinstance(self.hf_ds_config, HfDeepSpeedConfig):
self.hf_ds_config = HfDeepSpeedConfig(self.hf_ds_config)
if "gradient_accumulation_steps" not in self.hf_ds_config.config:
self.hf_ds_config.config["gradient_accumulation_steps"] = 1
if "zero_optimization" not in self.hf_ds_config.config:
raise ValueError("Please specify the ZeRO optimization config in the DeepSpeed config.")
self._deepspeed_config_checks()
plugin_to_config_mapping = {
"gradient_accumulation_steps": "gradient_accumulation_steps",
"gradient_clipping": "gradient_clipping",
"zero_stage": "zero_optimization.stage",
"offload_optimizer_device": "zero_optimization.offload_optimizer.device",
"offload_param_device": "zero_optimization.offload_param.device",
"offload_param_nvme_path": "zero_optimization.offload_param.nvme_path",
"offload_optimizer_nvme_path": "zero_optimization.offload_optimizer.nvme_path",
"zero3_save_16bit_model": "zero_optimization.stage3_gather_16bit_weights_on_model_save",
}
kwargs = {v: getattr(self, k) for k, v in plugin_to_config_mapping.items() if getattr(self, k) is not None}
for key in kwargs.keys():
self.fill_match(key, **kwargs, must_match=False)
self.hf_ds_config.set_stage_and_offload()
# filling the missing values in the class attributes from the DeepSpeed config
# when using the DeepSpeed config file.
for key, value in plugin_to_config_mapping.items():
config_value = self.hf_ds_config.get_value(value)
if config_value is not None and config_value != "auto":
setattr(self, key, config_value)
else:
config = {
"train_batch_size": "auto",
"train_micro_batch_size_per_gpu": "auto",
"gradient_accumulation_steps": self.gradient_accumulation_steps,
"zero_optimization": {
"stage": self.zero_stage,
"offload_optimizer": {
"device": self.offload_optimizer_device,
"nvme_path": self.offload_optimizer_nvme_path
if self.offload_optimizer_device == "nvme"
else None,
},
"offload_param": {
"device": self.offload_param_device,
"nvme_path": self.offload_param_nvme_path if self.offload_param_device == "nvme" else None,
},
"stage3_gather_16bit_weights_on_model_save": self.zero3_save_16bit_model,
},
}
if self.gradient_clipping:
config["gradient_clipping"] = self.gradient_clipping
self.hf_ds_config = HfDeepSpeedConfig(config)
self.deepspeed_config = self.hf_ds_config.config
self.deepspeed_config["steps_per_print"] = float("inf") # this will stop deepspeed from logging @ stdout
if self.zero3_init_flag is None:
self.zero3_init_flag = (
str_to_bool(os.environ.get("ACCELERATE_DEEPSPEED_ZERO3_INIT", str(self.hf_ds_config.is_zero3()))) == 1
)
if self.zero3_init_flag and not self.hf_ds_config.is_zero3():
warnings.warn("DeepSpeed Zero3 Init flag is only applicable for ZeRO Stage 3. Setting it to False.")
self.zero3_init_flag = False
# NOTE: Set to False by default, will be set to `True` automatically if it's the first plugin passed
# to the `Accelerator`'s `deepspeed_plugin` param, *or* `AcceleratorState().enable_deepspeed_plugin(plugin_key)` is manually called
self._set_selected(False)
# Ignore if it's already set
if self.enable_msamp and "msamp" not in self.deepspeed_config:
if self.zero_stage == 3:
raise NotImplementedError(
"MS-AMP is not supported for ZeRO Stage 3. Please use ZeRO Stage 0, 1, or 2 instead."
)
if self.msamp_opt_level not in ["O1", "O2"]:
raise ValueError("Invalid optimization level for MS-AMP. Please use one of ['O1' or'O2'].")
self.deepspeed_config["msamp"] = {"enabled": True, "opt_level": self.msamp_opt_level}