scripts/train_detection.py [284:331]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
                    ("data_time", data_time_meter),
                    ("batch_time", batch_time_meter)
                ])
            )

        data_time = time.time()

    return global_step


def validate(model, dataloader, loss_weights, **varargs):
    model.eval()
    dataloader.batch_sampler.set_epoch(varargs["epoch"])

    num_stuff = dataloader.dataset.num_stuff

    loss_meter = AverageMeter(())
    data_time_meter = AverageMeter(())
    batch_time_meter = AverageMeter(())

    # Accumulators for ap and panoptic computation
    coco_struct = []
    img_list = []

    data_time = time.time()
    for it, batch in enumerate(dataloader):
        with torch.no_grad():
            idxs = batch["idx"]
            batch_sizes = [img.shape[-2:] for img in batch["img"]]
            original_sizes = batch["size"]

            # Upload batch
            batch = {k: batch[k].cuda(device=varargs["device"], non_blocking=True) for k in NETWORK_INPUTS}
            data_time_meter.update(torch.tensor(time.time() - data_time))

            batch_time = time.time()

            # Run network
            losses, pred = model(**batch, do_loss=True, do_prediction=True)
            losses = OrderedDict((k, v.mean()) for k, v in losses.items())
            losses = all_reduce_losses(losses)
            loss = sum(w * l for w, l in zip(loss_weights, losses.values()))

            # Update meters
            loss_meter.update(loss.cpu())
            batch_time_meter.update(torch.tensor(time.time() - batch_time))

            del loss, losses
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



scripts/train_instance_seg.py [291:338]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
                    ("data_time", data_time_meter),
                    ("batch_time", batch_time_meter)
                ])
            )

        data_time = time.time()

    return global_step


def validate(model, dataloader, loss_weights, **varargs):
    model.eval()
    dataloader.batch_sampler.set_epoch(varargs["epoch"])

    num_stuff = dataloader.dataset.num_stuff

    loss_meter = AverageMeter(())
    data_time_meter = AverageMeter(())
    batch_time_meter = AverageMeter(())

    # Accumulators for ap and panoptic computation
    coco_struct = []
    img_list = []

    data_time = time.time()
    for it, batch in enumerate(dataloader):
        with torch.no_grad():
            idxs = batch["idx"]
            batch_sizes = [img.shape[-2:] for img in batch["img"]]
            original_sizes = batch["size"]

            # Upload batch
            batch = {k: batch[k].cuda(device=varargs["device"], non_blocking=True) for k in NETWORK_INPUTS}
            data_time_meter.update(torch.tensor(time.time() - data_time))

            batch_time = time.time()

            # Run network
            losses, pred = model(**batch, do_loss=True, do_prediction=True)
            losses = OrderedDict((k, v.mean()) for k, v in losses.items())
            losses = all_reduce_losses(losses)
            loss = sum(w * l for w, l in zip(loss_weights, losses.values()))

            # Update meters
            loss_meter.update(loss.cpu())
            batch_time_meter.update(torch.tensor(time.time() - batch_time))

            del loss, losses
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



