def main()

in visu/gradient_ascent.py [0:0]


def main():
    args = parser.parse_args()

    # sanity check
    if args.arch == 'alexnet':
        assert args.conv < 6
    elif args.arch == 'vgg16':
        assert args.conv < 14

    # create repo
    repo = os.path.join(args.exp, 'conv' + str(args.conv))
    if not os.path.isdir(repo):
        os.makedirs(repo)

    # build model
    model = load_model(args.model)
    model.cuda()
    for params in model.parameters():
        params.requires_grad = False
    model.eval()

    def gradient_ascent(f):
        print f,
        sys.stdout.flush()
        fname_out = '{0}/layer{1}-channel{2}.jpeg'.format(repo, args.conv, f)

        img_noise = np.random.normal(size=(args.idim, args.idim, 3)) * 20 + 128
        img_noise = img_noise.astype('float32')
        inp = transforms.ToTensor()(img_noise)
        inp = torch.unsqueeze(inp, 0)

        for it in range(args.niter):
            x = torch.autograd.Variable(inp.cuda(), requires_grad=True)
            out = forward(model, args.conv-1, f, x)
            criterion = nn.CrossEntropyLoss()
            filt_var = torch.autograd.Variable(torch.ones(1).long()*f).cuda()
            output = out.mean(3).mean(2)
            loss = - criterion(output, filt_var) - args.wd*torch.norm(x)**2

            # compute gradient
            loss.backward()

            # normalize gradient
            grads = x.grad.data.cpu()
            grads = grads.div(torch.norm(grads)+1e-8)

            # apply gradient
            inp = inp.add(args.lr*grads)

            # gaussian blur
            if it%args.step == 0:
                inp = gaussian_filter(torch.squeeze(inp).numpy().transpose((2, 1, 0)),
                                       sigma=(args.sig, args.sig, 0))
                inp = torch.unsqueeze(torch.from_numpy(inp).float().transpose(2, 0), 0)

            # save image at the last iteration
            if it == args.niter - 1:
                a = deprocess_image(inp.numpy())
                Image.fromarray(a).save(fname_out)

    map(gradient_ascent, range(CONV[args.arch][args.conv-1]))