diagnostics.py [106:122]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        for interval in range(recalibration_interval):
            logging.info("Interval: {}, recal_i: {}".format(interval, optimizer.recalibration_i))

            optimizer.full_grad_init()

            # Do a full gradient calculation:
            for inner_batch_idx, (data, target) in enumerate(train_loader):
                if args.cuda:
                    data, target = data.cuda(), target.cuda()
                data, target = Variable(data), Variable(target)

                def eval_closure():
                    optimizer.zero_grad()
                    output = model(data)
                    loss = criterion(output, target)
                    loss.backward()
                    return loss
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



diagnostics.py [185:201]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
                for interval in range(recalibration_interval):
                    logging.info("Interval: {}, recal_i: {}".format(interval, optimizer.recalibration_i))

                    optimizer.full_grad_init()

                    # Do a full gradient calculation:
                    for inner_batch_idx, (data, target) in enumerate(train_loader):
                        if args.cuda:
                            data, target = data.cuda(), target.cuda()
                        data, target = Variable(data), Variable(target)

                        def eval_closure():
                            optimizer.zero_grad()
                            output = model(data)
                            loss = criterion(output, target)
                            loss.backward()
                            return loss
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



