def in_run_diagnostics()

in diagnostics.py [0:0]


def in_run_diagnostics(epoch, batch_idx, args, train_loader, optimizer, model, criterion):
    #logging.info("in run diagnostics invoked")
    if (epoch % 10) == 0 or args.log_diagnostics_every_epoch:
        nbatches = len(train_loader)
        
        if args.log_diagnostics_deciles:
            log_intervals = math.ceil(nbatches/10.0)
            log_now = batch_idx % log_intervals == 0
        else:
            lp = math.ceil(nbatches/100.0)
            log_now = batch_idx == int(math.ceil(nbatches/50.0))
            log_now = log_now or batch_idx == int(math.ceil(nbatches/9.0))
            log_now = log_now or batch_idx == int(math.ceil(nbatches/3.0))
            log_now = log_now or batch_idx == nbatches-1

        if log_now:
            print("interval, batch_idx = {}".format(batch_idx))
            optimizer.logging_pass_start()

            if optimizer.epoch >= optimizer.vr_from_epoch:
                for inner_batch_idx, (data, target) in enumerate(train_loader):
                    if args.cuda:
                        data, target = data.cuda(), target.cuda()
                    data, target = Variable(data), Variable(target)

                    def eval_closure():
                        optimizer.zero_grad()
                        output = model(data)
                        loss = criterion(output, target)
                        loss.backward()
                        return loss

                    optimizer.logging_pass(inner_batch_idx, closure=eval_closure)
            logging.info("Logging pass finished")

            optimizer.logging_pass_end(batch_idx)