def main()

in eval_voc_classif.py [0:0]


def main():
    args = parser.parse_args()    
    print(args)

    # fix random seeds
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed_all(args.seed)
    np.random.seed(args.seed)

    # create model and move it to gpu
    model = load_model(args.model)
    model.top_layer = nn.Linear(model.top_layer.weight.size(1), 20)
    model.cuda()
    cudnn.benchmark = True

    # what partition of the data to use
    if args.split == 'train':
        args.test = 'val'
    elif args.split == 'trainval':
        args.test = 'test'
    # data loader
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])
    dataset = VOC2007_dataset(args.vocdir, split=args.split, transform=transforms.Compose([
            transforms.RandomHorizontalFlip(),
            transforms.RandomResizedCrop(224, scale=(args.min_scale, args.max_scale), ratio=(1, 1)),
            transforms.ToTensor(),
            normalize,
         ]))

    loader = torch.utils.data.DataLoader(dataset,
         batch_size=16, shuffle=False,
         num_workers=24, pin_memory=True)
    print('PASCAL VOC 2007 ' + args.split + ' dataset loaded')

    # re initialize classifier
    for y, m in enumerate(model.classifier.modules()):
        if isinstance(m, nn.Linear):
            m.weight.data.normal_(0, 0.01)
            m.bias.data.fill_(0.1)
    model.top_layer.bias.data.fill_(0.1)

    if args.fc6_8:
       # freeze some layers 
        for param in model.features.parameters():
            param.requires_grad = False
        # unfreeze batchnorm scaling
        if args.train_batchnorm:
            for layer in model.modules():
                if isinstance(layer, torch.nn.BatchNorm2d):
                    for param in layer.parameters():
                        param.requires_grad = True

    # set optimizer
    optimizer = torch.optim.SGD(
        filter(lambda x: x.requires_grad, model.parameters()),
        lr=args.lr,
        momentum=0.9,
        weight_decay=args.wd,
    )

    criterion = nn.BCEWithLogitsLoss(reduction='none')

    print('Start training')
    it = 0
    losses = AverageMeter()
    while it < args.nit:
        it = train(
            loader,
            model,
            optimizer,
            criterion,
            args.fc6_8,
            losses,
            it=it,
            total_iterations=args.nit,
            stepsize=args.stepsize,
        )

    print('Evaluation')
    if args.eval_random_crops:
        transform_eval = [
            transforms.RandomHorizontalFlip(),
            transforms.RandomResizedCrop(224, scale=(args.min_scale, args.max_scale), ratio=(1, 1)), 
            transforms.ToTensor(),
            normalize,
        ]
    else:
        transform_eval = [
            transforms.Resize(256),
            transforms.TenCrop(224),
            transforms.Lambda(lambda crops: torch.stack([normalize(transforms.ToTensor()(crop)) for crop in crops]))
        ] 

    print('Train set')
    train_dataset = VOC2007_dataset(args.vocdir, split=args.split, transform=transforms.Compose(transform_eval))
    train_loader = torch.utils.data.DataLoader(
        train_dataset,
        batch_size=1,
        shuffle=False,
        num_workers=24, 
        pin_memory=True,
    )
    evaluate(train_loader, model, args.eval_random_crops)

    print('Test set')
    test_dataset = VOC2007_dataset(args.vocdir, split=args.test, transform=transforms.Compose(transform_eval))
    test_loader = torch.utils.data.DataLoader(
        test_dataset,
        batch_size=1,
        shuffle=False,
        num_workers=24, 
        pin_memory=True,
    )
    evaluate(test_loader, model, args.eval_random_crops)