def step()

in pytorch/sagemakercv/training/optimizers/mlperf_fp16_optimizer.py [0:0]


    def step(self, closure=None):
        """
        Not supporting closure.
        """
        fp16_grads = []
        fp32_grads = []
        skip = False

	# for fp16 groups
        for group in self.fp16_params:
            fp16_grad = []
            for i, p in enumerate(group):
                fp16_grad.append(p.grad)
            fp16_grads.append(fp16_grad)

        # for fp32 groups
        for group in self.fp32_params:
            fp32_grad = []
            for i, p in enumerate(group):
                fp32_grad.append(p.grad)
            fp32_grads.append(fp32_grad)       

        # nan check
        self.overflow_buf.zero_()
        for fp16_grad in fp16_grads:
            if len(fp16_grad) > 0:
                norm, norm_per_tensor = multi_tensor_applier(self.multi_tensor_l2norm,
                                                             self.overflow_buf,
                                                             [fp16_grad], True)
                if self.overflow_buf.item() != 0:
                    skip = True
        for fp32_grad in fp32_grads:
            if len(fp32_grad) > 0:
                norm, norm_per_tensor = multi_tensor_applier(self.multi_tensor_l2norm,
                                                             self.overflow_buf,
                                                             [fp32_grad], True)
                if self.overflow_buf.item() != 0:
                    skip = True

        if skip:
            self._update_scale(skip)
            return
 
        dict_fp16 = {'params': self.fp16_params, 'master': self.fp32_from_fp16_params, 'grads': fp16_grads}
        dict_fp32 = {'params': self.fp32_params, 'grads': fp32_grads}
          
        self.optimizer.step(dict_fp16=dict_fp16, dict_fp32=dict_fp32, scale=self.cur_scale)

        self._update_scale(False)
        return