in improved_diffusion/train_util.py [0:0]
def optimize_fp16(self):
if any(not th.isfinite(p.grad).all() for p in self.model_params):
self.lg_loss_scale -= 1
logger.log(f"Found NaN, decreased lg_loss_scale to {self.lg_loss_scale}")
return
model_grads_to_master_grads(self.model_params, self.master_params)
self.master_params[0].grad.mul_(1.0 / (2 ** self.lg_loss_scale))
self._log_grad_norm()
self._anneal_lr()
self.opt.step()
for rate, params in zip(self.ema_rate, self.ema_params):
update_ema(params, self.master_params, rate=rate)
master_params_to_model_params(self.model_params, self.master_params)
self.lg_loss_scale += self.fp16_scale_growth