in main.py [0:0]
def get_lr_schedulers(args, datasets, is_trnsf):
if args.train.scheduling_mode == "decay":
gamma = get_exponential_decay_gamma(
args.train.scheduling_factor, args.train.trnsf_kwargs.max_epochs
)
lr_scheduler = [
LRScheduler(torch.optim.lr_scheduler.ExponentialLR, gamma=gamma)
]
elif args.train.scheduling_mode == "plateau":
lr_scheduler = [
LRScheduler(
torch.optim.lr_scheduler.ReduceLROnPlateau,
monitor="valid_loss",
factor=0.2,
)
]
elif args.train.scheduling_mode == "biplateau":
lr_scheduler = [
LRScheduler(
torch.optim.lr_scheduler.ReduceLROnPlateau,
monitor="valid_loss",
factor=0.2, # 0.1
patience=3, # 0.5
verbose=True,
threshold=0.01,
min_lr=1e-5,
),
# increase lr but max at 0.5
LRScheduler(SetLR, lr_lambda=lambda _, lr, __: min(lr * 1.3, 0.5)),
# dirty way for not increasing lr in case loss didn't improve
LRScheduler(
torch.optim.lr_scheduler.ReduceLROnPlateau,
monitor="valid_loss",
factor=1 / 1.3,
patience=1,
),
]
elif args.train.scheduling_mode is None:
lr_scheduler = []
else:
raise ValueError(
f"Unkown scheduling_mode={args.train.scheduling_mode}")
return lr_scheduler