in pytext/config/config_adapter.py [0:0]
def v1_to_v2(json_config):
# migrate optimizer params
[task] = json_config["task"].values()
if (
"scheduler" not in task
or task["scheduler"] is None
or task["scheduler"].get("type") is None
):
return json_config
op_type = task["scheduler"].get("type")
if op_type == "step_lr":
op_config = {"StepLR": {}}
for key in ["step_size", "gamma"]:
if key in task["scheduler"]:
op_config["StepLR"][key] = task["scheduler"][key]
task["scheduler"] = op_config
elif op_type == "lm_fine_tuning":
op_config = {"LmFineTuning": {}}
for key in [
"cut_frac",
"ratio",
"non_pretrained_param_groups",
"lm_lr_multiplier",
"lm_use_per_layer_lr",
"lm_gradual_unfreezing",
"last_epoch",
]:
if key in task["scheduler"]:
op_config["LmFineTuning"][key] = task["scheduler"][key]
task["scheduler"] = op_config
elif op_type == "reduce_lr_on_plateau":
op_config = {"ReduceLROnPlateau": {}}
for key in [
"lower_is_better",
"factor",
"patience",
"min_lr",
"threshold",
"threshold_is_absolute",
"cooldown",
]:
if key in task["scheduler"]:
op_config["ReduceLROnPlateau"][key] = task["scheduler"][key]
task["scheduler"] = op_config
elif op_type == "cosine_annealing_lr":
op_config = {"CosineAnnealingLR": {}}
for key in ["t_max", "eta_min"]:
if key in task["scheduler"]:
op_config["CosineAnnealingLR"][key] = task["scheduler"][key]
task["scheduler"] = op_config
elif op_type == "exponential_lr":
op_config = {"ExponentialLR": {}}
for key in ["gamma"]:
if key in task["scheduler"]:
op_config["ExponentialLR"][key] = task["scheduler"][key]
task["scheduler"] = op_config
elif op_type == "none":
del task["scheduler"]
else:
raise ValueError("Migration for your scheduler %s not supported." % op_type)
return json_config