in timm/optim/lamb.py [0:0]
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
clip_grad_norm = self._get_clip_grad_norm() # None if disabled
for group in self.param_groups:
bias_correction = 1 if group['bias_correction'] else 0
beta1, beta2 = group['betas']
grad_averaging = 1 if group['grad_averaging'] else 0
beta3 = 1 - beta1 if grad_averaging else 1.0
# assume same step across group now to simplify things
# per parameter step can be easily support by making it tensor, or pass list into kernel
if 'step' in group:
group['step'] += 1
else:
group['step'] = 1
if bias_correction:
bias_correction1 = 1 - beta1 ** group['step']
bias_correction2 = 1 - beta2 ** group['step']
else:
bias_correction1, bias_correction2 = 1.0, 1.0
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if clip_grad_norm is not None:
grad.div_(clip_grad_norm)
state = self.state[p]
# State initialization
if len(state) == 0:
# Exponential moving average of gradient valuesa
state['exp_avg'] = torch.zeros_like(p)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(grad, alpha=beta3) # m_t
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2) # v_t
denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps'])
update = (exp_avg / bias_correction1).div_(denom)
if group['caution']:
# Apply caution as per 'Cautious Optimizers' - https://arxiv.org/abs/2411.16085
mask = (update * grad > 0).to(grad.dtype)
mask.div_(mask.mean().clamp_(min=1e-3))
update.mul_(mask)
weight_decay = group['weight_decay']
if weight_decay != 0:
if group.get('decoupled_decay', False):
if group['corrected_weight_decay']:
wd_scale = group['lr'] ** 2 / self.defaults['lr']
else:
wd_scale = group['lr']
p.add_(p, alpha=-wd_scale * weight_decay)
else:
update.add_(p, alpha=weight_decay)
if weight_decay != 0 or group['always_adapt']:
# Layer-wise LR adaptation. By default, skip adaptation on parameters that are
# excluded from weight decay, unless always_adapt == True, then always enabled.
w_norm = p.norm(2.0)
g_norm = update.norm(2.0)
trust_ratio = w_norm / g_norm
# FIXME nested where required since logical and/or not working in PT XLA
# Set the ratio to 1.0 (no change) if either weight norm or grad norm is zero
trust_ratio = torch.where(
w_norm > 0,
torch.where(g_norm > 0, trust_ratio, 1.0),
1.0,
)
if group['trust_clip']:
# LAMBC trust clipping, upper bound fixed at one
trust_ratio = torch.clamp(trust_ratio, max=1.0)
update.mul_(trust_ratio)
p.add_(update, alpha=-group['lr'])
return loss