benchmark/scripts/models/lstm_synthetic.py [67:98]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        optimizer = RMSprop(lr=0.01)

        if use_dataset_tensors:
            # Create the dataset and its associated one-shot iterator.
            dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
            dataset = dataset.repeat()
            dataset = dataset.shuffle(10000)
            dataset = dataset.batch(self.batch_size)
            iterator = dataset.make_one_shot_iterator()

            # Model creation using tensors from the get_next() graph node.
            inputs, targets = iterator.get_next()

        if use_dataset_tensors:
            input_tensor = keras.layers.Input(tensor=inputs)
            model.add(Dense(input_dim_2))
            predictions = model(input_tensor)
            model = keras.models.Model(input_tensor, predictions)
        else:
            model.add(Dense(input_dim_2, activation='softmax'))

        # use multi gpu model for more than 1 gpu
        if (keras.backend.backend() == 'tensorflow' or
                keras.backend.backend() == 'mxnet') and gpus > 1:
            model = multi_gpu_model(model, gpus=gpus, cpu_merge=False)

        if use_dataset_tensors:
            model.compile(loss=crossentropy_from_logits,
                          optimizer=optimizer,
                          metrics=['accuracy'],
                          target_tensors=[targets])
        else:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



benchmark/scripts/models/lstm_text_generation.py [91:122]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        optimizer = RMSprop(lr=0.01)

        if use_dataset_tensors:
            # Create the dataset and its associated one-shot iterator.
            dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
            dataset = dataset.repeat()
            dataset = dataset.shuffle(10000)
            dataset = dataset.batch(self.batch_size)
            iterator = dataset.make_one_shot_iterator()

            # Model creation using tensors from the get_next() graph node.
            inputs, targets = iterator.get_next()

        if use_dataset_tensors:
            input_tensor = keras.layers.Input(tensor=inputs)
            model.add(Dense(input_dim_2))
            predictions = model(input_tensor)
            model = keras.models.Model(input_tensor, predictions)
        else:
            model.add(Dense(input_dim_2, activation='softmax'))

        # use multi gpu model for more than 1 gpu
        if (keras.backend.backend() == 'tensorflow' or
                keras.backend.backend() == 'mxnet') and gpus > 1:
            model = multi_gpu_model(model, gpus=gpus, cpu_merge=False)

        if use_dataset_tensors:
            model.compile(loss=crossentropy_from_logits,
                          optimizer=optimizer,
                          metrics=['accuracy'],
                          target_tensors=[targets])
        else:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



