training/utils/preprocess_questions.py [239:365]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        for j in range(len(action_labels[i])):
            action_labels_mat[i][j] = action_labels[i][j]

    # pad encoded questions
    maxQLength = max(len(x) for x in encoded_questions)
    for qe in encoded_questions:
        while len(qe) < maxQLength:
            qe.append(vocab['questionTokenToIdx']['<NULL>'])

    # make train/test splits
    inds = list(range(0, len(idx)))
    random.shuffle(inds)

    train_envs = data['splits']['train']
    val_envs = data['splits']['val']
    test_envs = data['splits']['test']

    assert any([x in train_envs for x in test_envs]) == False
    assert any([x in train_envs for x in val_envs]) == False

    train_inds = [i for i in inds if envs[i] in train_envs]
    val_inds = [i for i in inds if envs[i] in val_envs]
    test_inds = [i for i in inds if envs[i] in test_envs]

    # TRAIN
    train_idx = [idx[i] for i in train_inds]
    train_encoded_questions = [encoded_questions[i] for i in train_inds]
    train_question_types = [question_types[i] for i in train_inds]
    train_answers = [answers[i] for i in train_inds]
    train_envs = [envs[i] for i in train_inds]
    train_pos_queue = [pos_queue[i] for i in train_inds]
    train_boxes = [boxes[i] for i in train_inds]

    train_action_labels = action_labels_mat[train_inds]
    train_action_lengths = [action_lengths[i] for i in train_inds]

    # VAL
    val_idx = [idx[i] for i in val_inds]
    val_encoded_questions = [encoded_questions[i] for i in val_inds]
    val_question_types = [question_types[i] for i in val_inds]
    val_answers = [answers[i] for i in val_inds]
    val_envs = [envs[i] for i in val_inds]
    val_pos_queue = [pos_queue[i] for i in val_inds]
    val_boxes = [boxes[i] for i in val_inds]

    val_action_labels = action_labels_mat[val_inds]
    val_action_lengths = [action_lengths[i] for i in val_inds]

    # TEST
    test_idx = [idx[i] for i in test_inds]
    test_encoded_questions = [encoded_questions[i] for i in test_inds]
    test_question_types = [question_types[i] for i in test_inds]
    test_answers = [answers[i] for i in test_inds]
    test_envs = [envs[i] for i in test_inds]
    test_pos_queue = [pos_queue[i] for i in test_inds]
    test_boxes = [boxes[i] for i in test_inds]

    test_action_labels = action_labels_mat[test_inds]
    test_action_lengths = [action_lengths[i] for i in test_inds]

    # parse envs
    all_envs = list(set(envs))
    train_env_idx = [all_envs.index(x) for x in train_envs]
    val_env_idx = [all_envs.index(x) for x in val_envs]
    test_env_idx = [all_envs.index(x) for x in test_envs]

    # write h5 files
    print('Writing hdf5')

    train_encoded_questions = np.asarray(
        train_encoded_questions, dtype=np.int16)
    print('Train', train_encoded_questions.shape)
    with h5py.File(args.output_train_h5, 'w') as f:
        f.create_dataset('idx', data=np.asarray(train_idx))
        f.create_dataset('questions', data=train_encoded_questions)
        f.create_dataset('answers', data=np.asarray(train_answers))
        f.create_dataset(
            'action_labels',
            data=np.asarray(train_action_labels),
            dtype=np.int16)
        f.create_dataset(
            'action_lengths',
            data=np.asarray(train_action_lengths),
            dtype=np.int16)

    val_encoded_questions = np.asarray(val_encoded_questions, dtype=np.int16)
    print('Val', val_encoded_questions.shape)
    with h5py.File(args.output_val_h5, 'w') as f:
        f.create_dataset('idx', data=np.asarray(val_idx))
        f.create_dataset('questions', data=val_encoded_questions)
        f.create_dataset('answers', data=np.asarray(val_answers))
        f.create_dataset(
            'action_labels',
            data=np.asarray(val_action_labels),
            dtype=np.int16)
        f.create_dataset(
            'action_lengths',
            data=np.asarray(val_action_lengths),
            dtype=np.int16)

    test_encoded_questions = np.asarray(test_encoded_questions, dtype=np.int16)
    print('Test', test_encoded_questions.shape)
    with h5py.File(args.output_test_h5, 'w') as f:
        f.create_dataset('idx', data=np.asarray(test_idx))
        f.create_dataset('questions', data=test_encoded_questions)
        f.create_dataset('answers', data=np.asarray(test_answers))
        f.create_dataset(
            'action_labels',
            data=np.asarray(test_action_labels),
            dtype=np.int16)
        f.create_dataset(
            'action_lengths',
            data=np.asarray(test_action_lengths),
            dtype=np.int16)

    json.dump({
        'envs': all_envs,
        'train_env_idx': train_env_idx,
        'val_env_idx': val_env_idx,
        'test_env_idx': test_env_idx,
        'train_pos_queue': train_pos_queue,
        'val_pos_queue': val_pos_queue,
        'test_pos_queue': test_pos_queue,
        'train_boxes': train_boxes,
        'val_boxes': val_boxes,
        'test_boxes': test_boxes
    }, open(args.output_data_json, 'w'))
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



training/utils/preprocess_questions_pkl.py [241:367]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        for j in range(len(action_labels[i])):
            action_labels_mat[i][j] = action_labels[i][j]

    # pad encoded questions
    maxQLength = max(len(x) for x in encoded_questions)
    for qe in encoded_questions:
        while len(qe) < maxQLength:
            qe.append(vocab['questionTokenToIdx']['<NULL>'])

    # make train/test splits
    inds = list(range(0, len(idx)))
    random.shuffle(inds)

    train_envs = data['splits']['train']
    val_envs = data['splits']['val']
    test_envs = data['splits']['test']

    assert any([x in train_envs for x in test_envs]) == False
    assert any([x in train_envs for x in val_envs]) == False

    train_inds = [i for i in inds if envs[i] in train_envs]
    val_inds = [i for i in inds if envs[i] in val_envs]
    test_inds = [i for i in inds if envs[i] in test_envs]

    # TRAIN
    train_idx = [idx[i] for i in train_inds]
    train_encoded_questions = [encoded_questions[i] for i in train_inds]
    train_question_types = [question_types[i] for i in train_inds]
    train_answers = [answers[i] for i in train_inds]
    train_envs = [envs[i] for i in train_inds]
    train_pos_queue = [pos_queue[i] for i in train_inds]
    train_boxes = [boxes[i] for i in train_inds]

    train_action_labels = action_labels_mat[train_inds]
    train_action_lengths = [action_lengths[i] for i in train_inds]

    # VAL
    val_idx = [idx[i] for i in val_inds]
    val_encoded_questions = [encoded_questions[i] for i in val_inds]
    val_question_types = [question_types[i] for i in val_inds]
    val_answers = [answers[i] for i in val_inds]
    val_envs = [envs[i] for i in val_inds]
    val_pos_queue = [pos_queue[i] for i in val_inds]
    val_boxes = [boxes[i] for i in val_inds]

    val_action_labels = action_labels_mat[val_inds]
    val_action_lengths = [action_lengths[i] for i in val_inds]

    # TEST
    test_idx = [idx[i] for i in test_inds]
    test_encoded_questions = [encoded_questions[i] for i in test_inds]
    test_question_types = [question_types[i] for i in test_inds]
    test_answers = [answers[i] for i in test_inds]
    test_envs = [envs[i] for i in test_inds]
    test_pos_queue = [pos_queue[i] for i in test_inds]
    test_boxes = [boxes[i] for i in test_inds]

    test_action_labels = action_labels_mat[test_inds]
    test_action_lengths = [action_lengths[i] for i in test_inds]

    # parse envs
    all_envs = list(set(envs))
    train_env_idx = [all_envs.index(x) for x in train_envs]
    val_env_idx = [all_envs.index(x) for x in val_envs]
    test_env_idx = [all_envs.index(x) for x in test_envs]

    # write h5 files
    print('Writing hdf5')

    train_encoded_questions = np.asarray(
        train_encoded_questions, dtype=np.int16)
    print('Train', train_encoded_questions.shape)
    with h5py.File(args.output_train_h5, 'w') as f:
        f.create_dataset('idx', data=np.asarray(train_idx))
        f.create_dataset('questions', data=train_encoded_questions)
        f.create_dataset('answers', data=np.asarray(train_answers))
        f.create_dataset(
            'action_labels',
            data=np.asarray(train_action_labels),
            dtype=np.int16)
        f.create_dataset(
            'action_lengths',
            data=np.asarray(train_action_lengths),
            dtype=np.int16)

    val_encoded_questions = np.asarray(val_encoded_questions, dtype=np.int16)
    print('Val', val_encoded_questions.shape)
    with h5py.File(args.output_val_h5, 'w') as f:
        f.create_dataset('idx', data=np.asarray(val_idx))
        f.create_dataset('questions', data=val_encoded_questions)
        f.create_dataset('answers', data=np.asarray(val_answers))
        f.create_dataset(
            'action_labels',
            data=np.asarray(val_action_labels),
            dtype=np.int16)
        f.create_dataset(
            'action_lengths',
            data=np.asarray(val_action_lengths),
            dtype=np.int16)

    test_encoded_questions = np.asarray(test_encoded_questions, dtype=np.int16)
    print('Test', test_encoded_questions.shape)
    with h5py.File(args.output_test_h5, 'w') as f:
        f.create_dataset('idx', data=np.asarray(test_idx))
        f.create_dataset('questions', data=test_encoded_questions)
        f.create_dataset('answers', data=np.asarray(test_answers))
        f.create_dataset(
            'action_labels',
            data=np.asarray(test_action_labels),
            dtype=np.int16)
        f.create_dataset(
            'action_lengths',
            data=np.asarray(test_action_lengths),
            dtype=np.int16)

    json.dump({
        'envs': all_envs,
        'train_env_idx': train_env_idx,
        'val_env_idx': val_env_idx,
        'test_env_idx': test_env_idx,
        'train_pos_queue': train_pos_queue,
        'val_pos_queue': val_pos_queue,
        'test_pos_queue': test_pos_queue,
        'train_boxes': train_boxes,
        'val_boxes': val_boxes,
        'test_boxes': test_boxes
    }, open(args.output_data_json, 'w'))
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



