def get_lr()

in utils/lr_scheduler.py [0:0]


    def get_lr(self):
        if self.warm_d == 0:
            return super(WarmUpCosineAnnealingLR, self).get_lr()
        else:
            if not self._get_lr_called_within_step:
                warnings.warn("To get the last learning rate computed by the scheduler, "
                              "please use `get_last_lr()`.", UserWarning)
            if self.last_epoch == 0:
                return [lr / self.warm_m for lr in self.base_lrs]
                # return self.base_lrs / self.warm_m
            elif self.last_epoch <= self.warm_d:
                return [(self.warm_d + (self.warm_m - 1) * self.last_epoch) / (self.warm_d + (self.warm_m - 1) * (self.last_epoch - 1)) * group['lr'] for group in self.optimizer.param_groups]
            else:
                cos_last_epoch = self.last_epoch - self.warm_d
                if cos_last_epoch == 0:
                    return self.base_lrs
                elif (cos_last_epoch - 1 - self.cos_duration) % (2 * self.cos_duration) == 0:
                    return [group['lr'] + (base_lr - self.cos_eta_min) *
                            (1 - math.cos(math.pi / self.cos_duration)) / 2
                            for base_lr, group in
                            zip(self.base_lrs, self.optimizer.param_groups)]
                return [(1 + math.cos(math.pi * cos_last_epoch / self.cos_duration)) /
                        (1 + math.cos(math.pi * (cos_last_epoch - 1) / self.cos_duration)) *
                        (group['lr'] - self.cos_eta_min) + self.cos_eta_min
                        for group in self.optimizer.param_groups]