in optim/rwsadagrad.py [0:0]
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
if not self.momentum_initialized :
if p.grad.data.is_sparse:
self.state[p]['momentum'] = torch.full(
[p.data.shape[0]],
self.defaults["initial_accumulator_value"],
dtype=torch.float32,
)
else:
self.state[p]['sum'] = torch.full_like(p.data,
self.defaults["initial_accumulator_value"],
dtype=torch.float32,
)
grad = p.grad
state = self.state[p]
state['step'] += 1
if group['weight_decay'] != 0:
if p.grad.data.is_sparse:
raise RuntimeError("weight_decay option is not compatible with sparse gradients")
grad = grad.add(group['weight_decay'], p.data)
clr = group['lr'] / (1.0 + (state['step'] - 1.0) * group['lr_decay'])
if grad.is_sparse:
grad = grad.coalesce() # the update is non-linear so indices must be unique
grad_indices = grad._indices()
grad_values = grad._values()
size = grad.size()
def make_sparse(values, row_wise):
constructor = grad.new
matrix_size = [size[0]] if row_wise else size
return constructor(grad_indices, values, matrix_size)
if grad_values.numel() > 0:
momentum_update = make_sparse(grad_values.pow(2).mean(dim=1), True)
state['momentum'].add_(momentum_update) # update momentum
std = state['momentum'].sparse_mask(momentum_update.coalesce())
std_values = std._values().sqrt_().add_(group['eps'])
p.data.add_(make_sparse(grad_values / std_values.view(std_values.size()[0], 1), False), alpha=-clr)
else:
state['sum'].addcmul_(grad, grad, value=1.0)
std = state['sum'].sqrt().add_(group['eps'])
p.data.addcdiv_(grad, std, value=-clr)
self.momentum_initialized = True
return loss