in torchbiggraph/row_adagrad.py [0:0]
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group["params"]:
if p.grad is None:
continue
grad = p.grad.data
state = self.state[p]
# state['step'] += 1
if group["weight_decay"] != 0:
if grad.is_sparse:
raise RuntimeError(
"weight_decay option is not "
"compatible with sparse gradients "
)
grad = grad.add(group["weight_decay"], p.data)
# clr = group['lr'] / (1 + (state['step'] - 1) * group['lr_decay'])
clr = group["lr"]
if grad.is_sparse:
if grad._indices().numel() == 0:
continue
# the update is non-linear so indices must be unique
grad = grad.coalesce()
grad_indices = grad._indices()[0]
grad_values = grad._values()
# multiple HOGWILD processes may perform unsynchronized
# updates to G. Update a local copy of G independently from
# the shared-memory copy, to guarantee that
# local_G >= grad^2
local_G = state["sum"][grad_indices] # _sparse_mask
delta_G = (grad_values * grad_values).mean(1)
state["sum"].index_add_(0, grad_indices, delta_G)
local_G += delta_G
std_values = local_G.sqrt_().add_(1e-10).unsqueeze(1)
p.data.index_add_(0, grad_indices, -clr * grad_values / std_values)
else:
# multiple HOGWILD processes may perform unsynchronized
# updates to G. Update a local copy of G independently from
# the shared-memory copy, to guarantee that
# local_G >= grad^2
local_G = state["sum"].clone()
delta_G = (grad * grad).mean(1)
state["sum"] += delta_G
local_G += delta_G
std = local_G.sqrt().add_(1e-10)
p.data.addcdiv_(grad, std.unsqueeze(1), value=-clr)
return loss