in tensorflow_addons/optimizers/adabelief.py [0:0]
def _resource_apply_dense(self, grad, var):
var_dtype = var.dtype.base_dtype
lr_t = self._decayed_lr(var_dtype)
wd_t = self._decayed_wd(var_dtype)
m = self.get_slot(var, "m")
v = self.get_slot(var, "v")
beta_1_t = self._get_hyper("beta_1", var_dtype)
beta_2_t = self._get_hyper("beta_2", var_dtype)
epsilon_t = tf.convert_to_tensor(self.epsilon, var_dtype)
local_step = tf.cast(self.iterations + 1, var_dtype)
beta_1_power = tf.pow(beta_1_t, local_step)
beta_2_power = tf.pow(beta_2_t, local_step)
if self._initial_total_steps > 0:
total_steps = self._get_hyper("total_steps", var_dtype)
warmup_steps = total_steps * self._get_hyper("warmup_proportion", var_dtype)
min_lr = self._get_hyper("min_lr", var_dtype)
decay_steps = tf.maximum(total_steps - warmup_steps, 1)
decay_rate = (min_lr - lr_t) / decay_steps
lr_t = tf.where(
local_step <= warmup_steps,
lr_t * (local_step / warmup_steps),
lr_t + decay_rate * tf.minimum(local_step - warmup_steps, decay_steps),
)
sma_inf = 2.0 / (1.0 - beta_2_t) - 1.0
sma_t = sma_inf - 2.0 * local_step * beta_2_power / (1.0 - beta_2_power)
m_t = m.assign(
beta_1_t * m + (1.0 - beta_1_t) * grad, use_locking=self._use_locking
)
m_corr_t = m_t / (1.0 - beta_1_power)
v_t = v.assign(
beta_2_t * v + (1.0 - beta_2_t) * tf.math.square(grad - m_t) + epsilon_t,
use_locking=self._use_locking,
)
if self.amsgrad:
vhat = self.get_slot(var, "vhat")
vhat_t = vhat.assign(tf.maximum(vhat, v_t), use_locking=self._use_locking)
v_corr_t = tf.sqrt(vhat_t / (1.0 - beta_2_power))
else:
vhat_t = None
v_corr_t = tf.sqrt(v_t / (1.0 - beta_2_power))
if self.rectify:
r_t_numerator = (sma_t - 4.0) * (sma_t - 2.0) * sma_inf
r_t_denominator = (sma_inf - 4.0) * (sma_inf - 2.0) * sma_t
r_t = tf.sqrt(r_t_numerator / r_t_denominator)
sma_threshold = self._get_hyper("sma_threshold", var_dtype)
var_t = tf.where(
sma_t >= sma_threshold,
r_t * m_corr_t / (v_corr_t + epsilon_t),
m_corr_t,
)
else:
var_t = m_corr_t / (v_corr_t + epsilon_t)
if self._has_weight_decay:
var_t += wd_t * var
var_update = var.assign_sub(lr_t * var_t, use_locking=self._use_locking)
updates = [var_update, m_t, v_t]
if self.amsgrad:
updates.append(vhat_t)
return tf.group(*updates)