tensor2tensor/rl/dopamine_connector.py [111:197]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    self.env_batch_size = env_batch_size
    obs_size = dqn_agent.NATURE_DQN_OBSERVATION_SHAPE
    state_shape = [self.env_batch_size, obs_size[0], obs_size[1],
                   dqn_agent.NATURE_DQN_STACK_SIZE]
    self.state_batch = np.zeros(state_shape)
    self.state = None  # assure it will be not used
    self._observation = None  # assure it will be not used
    self.reset_current_rollouts()

  def reset_current_rollouts(self):
    self._current_rollouts = [[] for _ in range(self.env_batch_size)]

  def _record_observation(self, observation_batch):
    # Set current observation. Represents an (batch_size x 84 x 84 x 1) image
    # frame.
    observation_batch = np.array(observation_batch)
    self._observation_batch = observation_batch[:, :, :, 0]
    # Swap out the oldest frames with the current frames.
    self.state_batch = np.roll(self.state_batch, -1, axis=3)
    self.state_batch[:, :, :, -1] = self._observation_batch

  def _reset_state(self):
    self.state_batch.fill(0)

  def begin_episode(self, observation):
    self._reset_state()
    self._record_observation(observation)

    if not self.eval_mode:
      self._train_step()

    self.action = self._select_action()
    return self.action

  def _update_current_rollouts(self, last_observation, action, reward,
                               are_terminal):
    transitions = zip(last_observation, action, reward, are_terminal)
    for transition, rollout in zip(transitions, self._current_rollouts):
      rollout.append(transition)

  def _store_current_rollouts(self):
    for rollout in self._current_rollouts:
      for transition in rollout:
        self._store_transition(*transition)
    self.reset_current_rollouts()

  def step(self, reward, observation):
    self._last_observation = self._observation_batch
    self._record_observation(observation)

    if not self.eval_mode:
      self._update_current_rollouts(self._last_observation, self.action, reward,
                                    [False] * self.env_batch_size)
      # We want to have the same train_step:env_step ratio not depending on
      # batch size.
      for _ in range(self.env_batch_size):
        self._train_step()

    self.action = self._select_action()
    return self.action

  def end_episode(self, reward):
    if not self.eval_mode:
      self._update_current_rollouts(
          self._observation_batch, self.action, reward,
          [True] * self.env_batch_size)
      self._store_current_rollouts()

  def _select_action(self):
    epsilon = self.epsilon_eval
    if not self.eval_mode:
      epsilon = self.epsilon_fn(
          self.epsilon_decay_period,
          self.training_steps,
          self.min_replay_history,
          self.epsilon_train)

    def choose_action(ix):
      if random.random() <= epsilon:
        # Choose a random action with probability epsilon.
        return random.randint(0, self.num_actions - 1)
      else:
        # Choose the action with highest Q-value at the current state.
        return self._sess.run(self._q_argmax,
                              {self.state_ph: self.state_batch[ix:ix+1]})

    return np.array([choose_action(ix) for ix in range(self.env_batch_size)])
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



tensor2tensor/rl/dopamine_connector.py [304:390]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    self.env_batch_size = env_batch_size
    obs_size = dqn_agent.NATURE_DQN_OBSERVATION_SHAPE
    state_shape = [self.env_batch_size, obs_size[0], obs_size[1],
                   dqn_agent.NATURE_DQN_STACK_SIZE]
    self.state_batch = np.zeros(state_shape)
    self.state = None  # assure it will be not used
    self._observation = None  # assure it will be not used
    self.reset_current_rollouts()

  def reset_current_rollouts(self):
    self._current_rollouts = [[] for _ in range(self.env_batch_size)]

  def _record_observation(self, observation_batch):
    # Set current observation. Represents an (batch_size x 84 x 84 x 1) image
    # frame.
    observation_batch = np.array(observation_batch)
    self._observation_batch = observation_batch[:, :, :, 0]
    # Swap out the oldest frames with the current frames.
    self.state_batch = np.roll(self.state_batch, -1, axis=3)
    self.state_batch[:, :, :, -1] = self._observation_batch

  def _reset_state(self):
    self.state_batch.fill(0)

  def begin_episode(self, observation):
    self._reset_state()
    self._record_observation(observation)

    if not self.eval_mode:
      self._train_step()

    self.action = self._select_action()
    return self.action

  def _update_current_rollouts(self, last_observation, action, reward,
                               are_terminal):
    transitions = zip(last_observation, action, reward, are_terminal)
    for transition, rollout in zip(transitions, self._current_rollouts):
      rollout.append(transition)

  def _store_current_rollouts(self):
    for rollout in self._current_rollouts:
      for transition in rollout:
        self._store_transition(*transition)
    self.reset_current_rollouts()

  def step(self, reward, observation):
    self._last_observation = self._observation_batch
    self._record_observation(observation)

    if not self.eval_mode:
      self._update_current_rollouts(self._last_observation, self.action, reward,
                                    [False] * self.env_batch_size)
      # We want to have the same train_step:env_step ratio not depending on
      # batch size.
      for _ in range(self.env_batch_size):
        self._train_step()

    self.action = self._select_action()
    return self.action

  def end_episode(self, reward):
    if not self.eval_mode:
      self._update_current_rollouts(
          self._observation_batch, self.action, reward,
          [True] * self.env_batch_size)
      self._store_current_rollouts()

  def _select_action(self):
    epsilon = self.epsilon_eval
    if not self.eval_mode:
      epsilon = self.epsilon_fn(
          self.epsilon_decay_period,
          self.training_steps,
          self.min_replay_history,
          self.epsilon_train)

    def choose_action(ix):
      if random.random() <= epsilon:
        # Choose a random action with probability epsilon.
        return random.randint(0, self.num_actions - 1)
      else:
        # Choose the action with highest Q-value at the current state.
        return self._sess.run(self._q_argmax,
                              {self.state_ph: self.state_batch[ix:ix+1]})

    return np.array([choose_action(ix) for ix in range(self.env_batch_size)])
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



