exp_vd/eval_sl.py [87:126]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
support.pretty_print_dict(args)

# Data files
root = args['data_root']
imdb_path_val = os.path.join(root, 'imdb_%s.npy' % args['test_split'])

# assemblers for question and caption programs
question_assembler = Assembler(args['prog_vocab_path'])
caption_assembler = Assembler(args['prog_vocab_path'])
assemblers = {'ques': question_assembler, 'cap': caption_assembler}

# dataloader for val
input_dict = {'path': imdb_path_val, 'shuffle': False, 'one_pass': True,
              'args': args, 'assembler': question_assembler,
              'fetch_options': True}
val_loader = DataReader(input_dict)

# model for training
eval_params = args.copy()
eval_params['use_gt_prog'] = False # for training
eval_params['enc_dropout'] = False
eval_params['dec_dropout'] = False
eval_params['dec_sampling'] = False # do not sample, take argmax

# for models trained later
if 'num_rounds' not in eval_params:
  eval_params['num_rounds'] = val_loader.batch_loader.num_rounds

# model for evaluation
# create another assembler of caption
model = CorefNMN(eval_params, assemblers)

# Load snapshot
print('Loading checkpoint from: %s' % args['checkpoint'])
snapshot_saver = tf.train.Saver(max_to_keep=None)  # keep all snapshots
snapshot_saver.restore(sess, args['checkpoint'])

print('Evaluating on %s' % args['test_split'])
ranks = []
matches = []
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



exp_vd/visualize_sl.py [87:126]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
support.pretty_print_dict(args)

# Data files
root = args['data_root']
imdb_path_val = os.path.join(root, 'imdb_%s.npy' % args['test_split'])

# assemblers for question and caption programs
question_assembler = Assembler(args['prog_vocab_path'])
caption_assembler = Assembler(args['prog_vocab_path'])
assemblers = {'ques': question_assembler, 'cap': caption_assembler}

# dataloader for val
input_dict = {'path': imdb_path_val, 'shuffle': False, 'one_pass': True,
              'args': args, 'assembler': question_assembler,
              'fetch_options': True}
val_loader = DataReader(input_dict)

# model for training
eval_params = args.copy()
eval_params['use_gt_prog'] = False # for training
eval_params['enc_dropout'] = False
eval_params['dec_dropout'] = False
eval_params['dec_sampling'] = False # do not sample, take argmax

# for models trained later
if 'num_rounds' not in eval_params:
  eval_params['num_rounds'] = val_loader.batch_loader.num_rounds

# model for evaluation
# create another assembler of caption
model = CorefNMN(eval_params, assemblers)

# Load snapshot
print('Loading checkpoint from: %s' % args['checkpoint'])
snapshot_saver = tf.train.Saver(max_to_keep=None)  # keep all snapshots
snapshot_saver.restore(sess, args['checkpoint'])

print('Evaluating on %s' % args['test_split'])
ranks = []
matches = []
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



