MTRF/algorithms/softlearning/algorithms/multi_sac.py [871:951]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        if self._save_eval_paths:
            import pickle
            file_name = f'eval_paths_{iteration // self.epoch_length}.pkl'
            with open(os.path.join(os.getcwd(), file_name)) as f:
                pickle.dump(evaluation_paths_per_policy, f)

        if self._plotter:
            self._plotter.draw()

        return diagnostics

    @property
    def tf_saveables(self):
        saveables = {
            **{
                f'_policy_optimizer_{i}': policy_optimizer
                for i, policy_optimizer in enumerate(self._policy_optimizers)
            },
            **{
                f'_log_alphas_{i}': log_alpha
                for i, log_alpha in enumerate(self._log_alphas)
            },
        }

        Q_optimizer_saveables = [
            {
                f'Q_optimizer_{i}_{j}': Q_optimizer
                for j, Q_optimizer in enumerate(Q_optimizers)
            }
            for i, Q_optimizers in enumerate(self._Q_optimizers_per_policy)
        ]

        for Q_opt_dict in Q_optimizer_saveables:
            saveables.update(Q_opt_dict)

        if hasattr(self, '_alpha_optimizer'):
            saveables['_alpha_optimizer'] = self._alpha_optimizer

        return saveables

    def _initial_exploration_hook(self, env, initial_exploration_policy, goal_index):
        print("start random exploration")
        if self._n_initial_exploration_steps < 1:
            return

        if not initial_exploration_policy:
            raise ValueError(
                "Initial exploration policy must be provided when"
                " n_initial_exploration_steps > 0.")
        self._set_goal(goal_index)
        # env.set_goal(goal_index)

        self._samplers[goal_index].initialize(env, initial_exploration_policy, self._pools[goal_index])
        while self._pools[goal_index].size < self._n_initial_exploration_steps:
            self._samplers[goal_index].sample()

    def _init_training(self):
        for i in range(self._num_goals):
            self._update_target(i, tau=1.0)

    def _initialize_samplers(self):
        for i, sampler in enumerate(self._samplers):
            sampler.initialize(self._training_environment, self._policies[i], self._pools[i])
            sampler.set_save_training_video_frequency(self._save_training_video_frequency)
            if hasattr(sampler, 'set_algorithm'):
                sampler.set_algorithm(self)
        self._n_episodes_elapsed = sum([self._samplers[i]._n_episodes for i in range(self._num_goals)])

    @property
    def ready_to_train(self):
        return self._samplers[self._goal_index].batch_ready()

    def _do_sampling(self, timestep):
        self._sample_count += 1
        self._samplers[self._goal_index].sample()

    def _set_goal(self, goal_index):
        """ Set goal in env. """
        assert goal_index >= 0 and goal_index < self._num_goals
        # print("setting goal to: ", goal_index, ", n_episodes_elapsed: ", self._n_episodes_elapsed)
        self._goal_index = goal_index
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



MTRF/algorithms/softlearning/algorithms/phased_sac.py [601:682]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        if self._save_eval_paths:
            import pickle
            file_name = f'eval_paths_{iteration // self.epoch_length}.pkl'
            with open(os.path.join(os.getcwd(), file_name)) as f:
                pickle.dump(evaluation_paths_per_policy, f)

        if self._plotter:
            self._plotter.draw()

        return diagnostics

    @property
    def tf_saveables(self):
        saveables = {
            **{
                f'_policy_optimizer_{i}': policy_optimizer
                for i, policy_optimizer in enumerate(self._policy_optimizers)
            },
            **{
                f'_log_alphas_{i}': log_alpha
                for i, log_alpha in enumerate(self._log_alphas)
            },
        }

        Q_optimizer_saveables = [
            {
                f'Q_optimizer_{i}_{j}': Q_optimizer
                for j, Q_optimizer in enumerate(Q_optimizers)
            }
            for i, Q_optimizers in enumerate(self._Q_optimizers_per_policy)
        ]

        for Q_opt_dict in Q_optimizer_saveables:
            saveables.update(Q_opt_dict)

        if hasattr(self, '_alpha_optimizer'):
            saveables['_alpha_optimizer'] = self._alpha_optimizer

        return saveables

    def _initial_exploration_hook(self, env, initial_exploration_policy, goal_index):
        print("start random exploration")
        if self._n_initial_exploration_steps < 1:
            return

        if not initial_exploration_policy:
            raise ValueError(
                "Initial exploration policy must be provided when"
                " n_initial_exploration_steps > 0.")
        self._set_goal(goal_index)
        # env.set_goal(goal_index)

        self._samplers[goal_index].initialize(env, initial_exploration_policy, self._pools[goal_index])
        while self._pools[goal_index].size < self._n_initial_exploration_steps:
            self._samplers[goal_index].sample()

    def _init_training(self):
        for i in range(self._num_goals):
            self._update_target(i, tau=1.0)

    def _initialize_samplers(self):
        for i, sampler in enumerate(self._samplers):
            sampler.initialize(self._training_environment, self._policies[i], self._pools[i])
            sampler.set_save_training_video_frequency(self._save_training_video_frequency)
            if hasattr(sampler, 'set_algorithm'):
                sampler.set_algorithm(self)
        self._n_episodes_elapsed = sum([self._samplers[i]._n_episodes for i in range(self._num_goals)])

    @property
    def ready_to_train(self):
        return self._samplers[self._goal_index].batch_ready()

    def _do_sampling(self, timestep):
        self._sample_count += 1
        self._samplers[self._goal_index].sample()

    def _set_goal(self, goal_index):
        """ Set goal in env. """
        # TODO: Change this to phases
        assert goal_index >= 0 and goal_index < self._num_goals
        # print("setting goal to: ", goal_index, ", n_episodes_elapsed: ", self._n_episodes_elapsed)
        self._goal_index = goal_index
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



