higher/optim.py [544:556]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    r"""A differentiable version of the Adamax optimizer.

    This optimizer creates a gradient tape as it updates parameters."""

    def _update(self, grouped_grads: _GroupedGradsType, **kwargs) -> None:

        zipped = zip(self.param_groups, grouped_grads)
        for group_idx, (group, grads) in enumerate(zipped):
            for p_idx, (p, g) in enumerate(zip(group['params'], grads)):
                if g is None:
                    continue

                if g.is_sparse:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



higher/optim.py [597:609]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    r"""A differentiable version of the ASGD optimizer.

    This optimizer creates a gradient tape as it updates parameters."""

    def _update(self, grouped_grads: _GroupedGradsType, **kwargs) -> None:

        zipped = zip(self.param_groups, grouped_grads)
        for group_idx, (group, grads) in enumerate(zipped):
            for p_idx, (p, g) in enumerate(zip(group['params'], grads)):
                if g is None:
                    continue

                if g.is_sparse:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



