in experiment.py [0:0]
def init_optimizer(model, optimizer_state,
PARAM_GROUPS=(),
freeze_bn=False,
breed='sgd',
weight_decay=0.0005,
lr_policy='multistep',
lr=0.001,
gamma=0.1,
momentum=0.9,
betas=(0.9, 0.999),
milestones=[30, 37, ],
max_epochs=43,
):
# init the optimizer
if hasattr(model, '_get_param_groups') and model.custom_param_groups:
# use the model function
p_groups = model._get_param_groups(lr, wd=weight_decay)
else:
allprm = [prm for prm in model.parameters() if prm.requires_grad]
p_groups = [{'params': allprm, 'lr': lr}]
if breed == 'sgd':
optimizer = torch.optim.SGD(p_groups, lr=lr,
momentum=momentum,
weight_decay=weight_decay)
elif breed == 'adagrad':
optimizer = torch.optim.Adagrad(p_groups, lr=lr,
weight_decay=weight_decay)
elif breed == 'adam':
optimizer = torch.optim.Adam(p_groups, lr=lr,
betas=betas,
weight_decay=weight_decay)
else:
raise ValueError("no such solver type %s" % breed)
print(" -> solver type = %s" % breed)
if lr_policy == 'multistep':
scheduler = torch.optim.lr_scheduler.MultiStepLR(
optimizer, milestones=milestones, gamma=gamma)
else:
raise ValueError("no such lr policy %s" % lr_policy)
# add the max epochs here!
scheduler.max_epochs = max_epochs
if optimizer_state is not None:
print(" -> setting loaded optimizer state")
optimizer.load_state_dict(optimizer_state)
optimizer.zero_grad()
return optimizer, scheduler