question_generation_model.py [383:405]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
            for s in start_word:
                if self.datasets.use_keyword:
                    sequence = pad_sequences([s[0]], maxlen=max_seq_len, padding='post')
                    preds = model.predict([image_input, sequence, keyword])
                elif 'glove' in self.datasets.embedding_file:
                    sequence = pad_sequences([s[0]], maxlen=max_seq_len, padding='post')
                    preds = model.predict([image_input, sequence])
                elif 'elmo' in self.datasets.embedding_file:
                    sequence = ' '.join([self.datasets.idx_to_word[idx] for idx in s[0]])
                    sequence = self.cleanText(sequence)
                    sequence = np.array([sequence, sequence])
                    preds = model.predict([image_input, sequence])
                elif 'bert' in self.datasets.embedding_file:
                    sequence = ' '.join([self.datasets.idx_to_word[idx] for idx in s[0][1:]])
                    sequence = self.cleanText(sequence)
                    sequence = [[sequence], [sequence]]
                    # sequence = self.cleanText(sequence)
                    input_ids, input_masks, segment_ids, _ = preprocess_bert_input(sequence, [None] * len(sequence),
                                                                                   self.datasets.max_question_len,
                                                                                   self.tokenizer, self.vocab_size)
                    preds = model.predict([image_input, input_ids, input_masks, segment_ids])
                else:
                    exit(-1)
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



question_generation_model.py [545:567]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
            for s in start_word:
                # self.logger.info('Start word Tuple %s' % s)
                if self.datasets.use_keyword:
                    sequence = pad_sequences([s[0]], maxlen=max_seq_len, padding='post')
                    preds = model.predict([image_input, sequence, keyword])
                elif 'glove' in self.datasets.embedding_file:
                    sequence = pad_sequences([s[0]], maxlen=max_seq_len, padding='post')
                    preds = model.predict([image_input, sequence])
                elif 'elmo' in self.datasets.embedding_file:
                    sequence = ' '.join([self.datasets.idx_to_word[idx] for idx in s[0]])
                    sequence = self.cleanText(sequence)
                    sequence = np.array([sequence, sequence])
                    preds = model.predict([image_input, sequence])
                elif 'bert' in self.datasets.embedding_file:
                    sequence = ' '.join([self.datasets.idx_to_word[idx] for idx in s[0][1:]])
                    sequence = self.cleanText(sequence)
                    sequence = [[sequence], [sequence]]
                    input_ids, input_masks, segment_ids, _ = preprocess_bert_input(sequence, [None] * len(sequence),
                                                                                   self.datasets.max_question_len,
                                                                                   self.tokenizer, self.vocab_size)
                    preds = model.predict([image_input, input_ids, input_masks, segment_ids])
                else:
                    exit(-1)
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



