rlalgos/dqn/duelling_dqn.py [485:556]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
            self.evaluate()
        self.logger.update_csv()  # To save as a CSV file in logdir

        trajectories, n = self.train_batcher.get()

        self.train_batcher.close()
        self.evaluation_batcher.get()  # To wait for the last trajectories
        self.evaluation_batcher.close()
        self.logger.close()

    def soft_update_params(self, net, target_net, tau):
        for param, target_param in zip(net.parameters(), target_net.parameters()):
            target_param.data.copy_(tau * param.data + (1 - tau) * target_param.data)

    def evaluate(self, relaunch=True):

        evaluation_trajectories, n = self.evaluation_batcher.get(blocking=False)

        if evaluation_trajectories is None:
            return
        # print(evaluation_trajectories.trajectories.lengths)
        # assert n==0
        avg_reward = (
            (
                evaluation_trajectories.trajectories["_observation/reward"]
                * evaluation_trajectories.trajectories.mask()
            )
            .sum(1)
            .mean()
            .item()
        )
        self.logger.add_scalar("avg_reward", avg_reward, self.iteration)
        if self.config["verbose"]:
            print(
                "Iteration "
                + str(self.iteration)
                + ", Reward =  "
                + str(avg_reward)
                + ", Buffer size = "
                + str(self.replay_buffer.size())
            )

        if relaunch:
            self.evaluation_batcher.update(
                self._state_dict(self.learning_model, torch.device("cpu"))
            )
            n_episodes = (
                self.config["n_evaluation_envs"] * self.config["n_evaluation_processes"]
            )
            # self.evaluation_batcher.reset(agent_info=DictTensor({"epsilon":torch.zeros(n_episodes)}))
            self.evaluation_batcher.reset(
                agent_info=DictTensor({"epsilon": torch.zeros(n_episodes).float()})
            )
            self.evaluation_batcher.execute()
        return avg_reward

    def get_loss(self, transitions, device):
        transitions = transitions.to(device)
        B = transitions.n_elems()
        Bv = torch.arange(B).to(device)
        action = transitions["action/action"]
        reward = transitions["_observation/reward"]
        frame = transitions["observation/frame"]
        _frame = transitions["_observation/frame"]
        _done = transitions["_observation/done"].float()

        q = self.learning_model(frame)
        qa = q[Bv, action]

        # qp = self.learning_model(_frame).detach()
        _q_target = self.target_model(_frame).detach()
        _q_target_a = None
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



rlalgos/simple_ddqn/ddqn.py [266:337]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
            self.evaluate()
        self.logger.update_csv()  # To save as a CSV file in logdir

        trajectories, n = self.train_batcher.get()

        self.train_batcher.close()
        self.evaluation_batcher.get()  # To wait for the last trajectories
        self.evaluation_batcher.close()
        self.logger.close()

    def soft_update_params(self, net, target_net, tau):
        for param, target_param in zip(net.parameters(), target_net.parameters()):
            target_param.data.copy_(tau * param.data + (1 - tau) * target_param.data)

    def evaluate(self, relaunch=True):

        evaluation_trajectories, n = self.evaluation_batcher.get(blocking=False)

        if evaluation_trajectories is None:
            return
        # print(evaluation_trajectories.trajectories.lengths)
        # assert n==0
        avg_reward = (
            (
                evaluation_trajectories.trajectories["_observation/reward"]
                * evaluation_trajectories.trajectories.mask()
            )
            .sum(1)
            .mean()
            .item()
        )
        self.logger.add_scalar("avg_reward", avg_reward, self.iteration)
        if self.config["verbose"]:
            print(
                "Iteration "
                + str(self.iteration)
                + ", Reward =  "
                + str(avg_reward)
                + ", Buffer size = "
                + str(self.replay_buffer.size())
            )

        if relaunch:
            self.evaluation_batcher.update(
                self._state_dict(self.learning_model, torch.device("cpu"))
            )
            n_episodes = (
                self.config["n_evaluation_envs"] * self.config["n_evaluation_processes"]
            )
            # self.evaluation_batcher.reset(agent_info=DictTensor({"epsilon":torch.zeros(n_episodes)}))
            self.evaluation_batcher.reset(
                agent_info=DictTensor({"epsilon": torch.zeros(n_episodes).float()})
            )
            self.evaluation_batcher.execute()
        return avg_reward

    def get_loss(self, transitions, device):
        transitions = transitions.to(device)
        B = transitions.n_elems()
        Bv = torch.arange(B).to(device)
        action = transitions["action/action"]
        reward = transitions["_observation/reward"]
        frame = transitions["observation/frame"]
        _frame = transitions["_observation/frame"]
        _done = transitions["_observation/done"].float()

        q = self.learning_model(frame)
        qa = q[Bv, action]

        # qp = self.learning_model(_frame).detach()
        _q_target = self.target_model(_frame).detach()
        _q_target_a = None
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



