def swa_cyc_lr()

in schedulers.py [0:0]


def swa_cyc_lr(optimizer, args, **kwargs):
    def _lr_adjuster(epoch, iteration):
        if epoch < args.warmup_length:
            lr = _warmup_lr(args.lr, args.warmup_length, epoch)
        else:
            b = args.epochs
            # TODO: this only works for 160 epochs as of now.
            a = int(args.swa_start * 160)

            ts = [b]
            if args.num_models >= 2:
                ts = [
                    x
                    for x in np.arange(
                        a, b + 1, (b - a) // (args.num_models - 1)
                    )
                ]
                if len(ts) > args.num_models:
                    ts = [
                        x
                        for x in np.arange(
                            a, b, (b - a) // (args.num_models - 1)
                        )
                    ]
            if ts[-1] != b:
                ts[-1] = b

            if epoch < ts[0]:
                lrbase = args.lr
                e = epoch - args.warmup_length
                es = ts[0] - args.warmup_length
                lr = 0.5 * (1 + np.cos(np.pi * e / es)) * lrbase
            else:
                for i in range(len(ts)):
                    if epoch >= ts[i]:
                        lrbase = args.swa_lr
                        e = epoch - ts[i]
                        es = ts[i + 1] - ts[i]
                        lr = 0.5 * (1 + np.cos(np.pi * e / es)) * lrbase

        assign_learning_rate(optimizer, lr)
        print(lr)
        return lr

    return _lr_adjuster