in scripts/run_model.py [0:0]
def run_baseline_batch(args, model, loader, dtype):
model.type(dtype)
model.eval()
all_scores, all_probs = [], []
num_correct, num_samples = 0, 0
for batch in loader:
questions, images, feats, answers, programs, program_lists = batch
questions_var = Variable(questions.type(dtype).long(), volatile=True)
feats_var = Variable(feats.type(dtype), volatile=True)
scores = model(questions_var, feats_var)
probs = F.softmax(scores)
_, preds = scores.data.cpu().max(1)
all_scores.append(scores.data.cpu().clone())
all_probs.append(probs.data.cpu().clone())
num_correct += (preds == answers).sum()
num_samples += preds.size(0)
print('Ran %d samples' % num_samples)
acc = float(num_correct) / num_samples
print('Got %d / %d = %.2f correct' % (num_correct, num_samples, 100 * acc))
all_scores = torch.cat(all_scores, 0)
all_probs = torch.cat(all_probs, 0)
if args.output_h5 is not None:
print('Writing output to %s' % args.output_h5)
with h5py.File(args.output_h5, 'w') as fout:
fout.create_dataset('scores', data=all_scores.numpy())
fout.create_dataset('probs', data=all_probs.numpy())