training/utils/preprocess_questions.py [21:168]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def tokenize(seq,
             delim=' ',
             punctToRemove=None,
             addStartToken=True,
             addEndToken=True):

    if punctToRemove is not None:
        for p in punctToRemove:
            seq = str(seq).replace(p, '')

    tokens = str(seq).split(delim)
    if addStartToken:
        tokens.insert(0, '<START>')

    if addEndToken:
        tokens.append('<END>')

    return tokens


def buildVocab(sequences,
               minTokenCount=1,
               delim=' ',
               punctToRemove=None,
               addSpecialTok=False):
    SPECIAL_TOKENS = {
        '<NULL>': 0,
        '<START>': 1,
        '<END>': 2,
        '<UNK>': 3,
    }

    tokenToCount = {}
    for seq in sequences:
        seqTokens = tokenize(
            seq,
            delim=delim,
            punctToRemove=punctToRemove,
            addStartToken=False,
            addEndToken=False)
        for token in seqTokens:
            if token not in tokenToCount:
                tokenToCount[token] = 0
            tokenToCount[token] += 1

    tokenToIdx = {}
    if addSpecialTok == True:
        for token, idx in SPECIAL_TOKENS.items():
            tokenToIdx[token] = idx
    for token, count in sorted(tokenToCount.items()):
        if count >= minTokenCount:
            tokenToIdx[token] = len(tokenToIdx)

    return tokenToIdx


def encode(seqTokens, tokenToIdx, allowUnk=False):
    seqIdx = []
    for token in seqTokens:
        if token not in tokenToIdx:
            if allowUnk:
                token = '<UNK>'
            else:
                raise KeyError('Token "%s" not in vocab' % token)
        seqIdx.append(tokenToIdx[token])
    return seqIdx


def decode(seqIdx, idxToToken, delim=None, stopAtEnd=True):
    tokens = []
    for idx in seqIdx:
        tokens.append(idxToToken[idx])
        if stopAtEnd and tokens[-1] == '<END>':
            break
    if delim is None:
        return tokens
    else:
        return delim.join(tokens)


def preprocessImages(obj, render_dir=False):
    working_dir = os.path.join(render_dir, 'working')
    path_id = obj['path_id']
    image_paths = []
    for i in range(len(obj['pos_queue']) - 1):
        image_paths.append('%s/%s_%05d.jpg' % (working_dir, path_id, i + 1))

    image_frames = []
    for i in image_paths:
        if os.path.isfile(i) == False:
            print(i)
            return False
        img = imread(i, mode='RGB')
        img = imresize(img, (224, 224), interp='bicubic')
        img = img.transpose(2, 0, 1)
        img = img / 255.0
        image_frames.append(img)
        # TODO: mean subtraction

    return image_frames


def processActions(actions):
    # from shortest-path-gen format
    # 0: forward
    # 1: left
    # 2: right
    # 3: stop
    #
    # to
    # 0: null
    # 1: start
    # 2: forward
    # 3: left
    # 4: right
    # 5: stop
    # for model training
    action_translations = {0: 2, 1: 3, 2: 4, 3: 5}

    action_ids = [1]

    for i in actions:
        action_ids.append(action_translations[i])
    return action_ids


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('-input_json', required=True)
    parser.add_argument('-input_vocab', default=None)
    parser.add_argument('-output_train_h5', required=True)
    parser.add_argument('-output_val_h5', required=True)
    parser.add_argument('-output_test_h5', required=True)
    parser.add_argument('-output_data_json', required=True)
    parser.add_argument('-output_vocab', default=None)
    parser.add_argument('-num_ques', default=10000000, type=int)
    parser.add_argument('-shortest_path_dir', required=True, type=str)
    args = parser.parse_args()

    random.seed(123)
    np.random.seed(123)

    assert args.input_vocab != None or args.output_vocab != None, "Either input or output vocab required"

    data = json.load(open(args.input_json, 'r'))

    houses = data['questions']
    questions = []
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



training/utils/preprocess_questions_pkl.py [17:164]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def tokenize(seq,
             delim=' ',
             punctToRemove=None,
             addStartToken=True,
             addEndToken=True):

    if punctToRemove is not None:
        for p in punctToRemove:
            seq = str(seq).replace(p, '')

    tokens = str(seq).split(delim)
    if addStartToken:
        tokens.insert(0, '<START>')

    if addEndToken:
        tokens.append('<END>')

    return tokens


def buildVocab(sequences,
               minTokenCount=1,
               delim=' ',
               punctToRemove=None,
               addSpecialTok=False):
    SPECIAL_TOKENS = {
        '<NULL>': 0,
        '<START>': 1,
        '<END>': 2,
        '<UNK>': 3,
    }

    tokenToCount = {}
    for seq in sequences:
        seqTokens = tokenize(
            seq,
            delim=delim,
            punctToRemove=punctToRemove,
            addStartToken=False,
            addEndToken=False)
        for token in seqTokens:
            if token not in tokenToCount:
                tokenToCount[token] = 0
            tokenToCount[token] += 1

    tokenToIdx = {}
    if addSpecialTok == True:
        for token, idx in SPECIAL_TOKENS.items():
            tokenToIdx[token] = idx
    for token, count in sorted(tokenToCount.items()):
        if count >= minTokenCount:
            tokenToIdx[token] = len(tokenToIdx)

    return tokenToIdx


def encode(seqTokens, tokenToIdx, allowUnk=False):
    seqIdx = []
    for token in seqTokens:
        if token not in tokenToIdx:
            if allowUnk:
                token = '<UNK>'
            else:
                raise KeyError('Token "%s" not in vocab' % token)
        seqIdx.append(tokenToIdx[token])
    return seqIdx


def decode(seqIdx, idxToToken, delim=None, stopAtEnd=True):
    tokens = []
    for idx in seqIdx:
        tokens.append(idxToToken[idx])
        if stopAtEnd and tokens[-1] == '<END>':
            break
    if delim is None:
        return tokens
    else:
        return delim.join(tokens)


def preprocessImages(obj, render_dir=False):
    working_dir = os.path.join(render_dir, 'working')
    path_id = obj['path_id']
    image_paths = []
    for i in range(len(obj['pos_queue']) - 1):
        image_paths.append('%s/%s_%05d.jpg' % (working_dir, path_id, i + 1))

    image_frames = []
    for i in image_paths:
        if os.path.isfile(i) == False:
            print(i)
            return False
        img = imread(i, mode='RGB')
        img = imresize(img, (224, 224), interp='bicubic')
        img = img.transpose(2, 0, 1)
        img = img / 255.0
        image_frames.append(img)
        # TODO: mean subtraction

    return image_frames


def processActions(actions):
    # from shortest-path-gen format
    # 0: forward
    # 1: left
    # 2: right
    # 3: stop
    #
    # to
    # 0: null
    # 1: start
    # 2: forward
    # 3: left
    # 4: right
    # 5: stop
    # for model training
    action_translations = {0: 2, 1: 3, 2: 4, 3: 5}

    action_ids = [1]

    for i in actions:
        action_ids.append(action_translations[i])
    return action_ids


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('-input_json', required=True)
    parser.add_argument('-input_vocab', default=None)
    parser.add_argument('-output_train_h5', required=True)
    parser.add_argument('-output_val_h5', required=True)
    parser.add_argument('-output_test_h5', required=True)
    parser.add_argument('-output_data_json', required=True)
    parser.add_argument('-output_vocab', default=None)
    parser.add_argument('-num_ques', default=10000000, type=int)
    parser.add_argument('-shortest_path_dir', required=True, type=str)
    args = parser.parse_args()

    random.seed(123)
    np.random.seed(123)

    assert args.input_vocab != None or args.output_vocab != None, "Either input or output vocab required"

    data = json.load(open(args.input_json, 'r'))

    houses = data['questions']
    questions = []
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



