MTRF/algorithms/softlearning/algorithms/multi_sac.py [432:635]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    def _init_critic_updates(self):
        """Create minimization operation for critics' Q-functions.

        Creates a `tf.optimizer.minimize` operation for updating
        critic Q-function with gradient descent, and appends it to
        `self._training_ops` attribute.

        See Equations (5, 6) in [1], for further information of the
        Q-function update rule.
        """
        Q_targets = self._get_Q_targets()
        assert len(Q_targets) == len(self._policies)
        for Q_target in Q_targets:
            assert Q_target.shape.as_list() == [None, 1]

        self._Q_optimizers_per_policy = []
        self._Q_values_per_policy = []
        self._Q_losses_per_policy = []

        for i, Qs in enumerate(self._Qs_per_policy):
            Q_observations = {
                name: self._placeholders['observations'][name]
                for name in Qs[0].observation_keys
            }
            Q_inputs = flatten_input_structure({
                **Q_observations, 'actions': self._placeholders['actions']})

            Q_values = tuple(Q(Q_inputs) for Q in Qs)
            self._Q_values_per_policy.append(Q_values)

            Q_losses = tuple(
                tf.compat.v1.losses.mean_squared_error(
                    labels=Q_targets[i], predictions=Q_value, weights=0.5)
                for Q_value in Q_values)
            self._Q_losses_per_policy.append(Q_losses)

            # self._bellman_errors.append(tf.reduce_min(tuple(
            #     tf.math.squared_difference(Q_target, Q_value)
            #     for Q_value in Q_values), axis=0))

            Q_optimizers = tuple(
                tf.compat.v1.train.AdamOptimizer(
                    learning_rate=self._Q_lr,
                    name='{}_{}_optimizer_{}'.format(i, Q._name, j)
                ) for j, Q in enumerate(Qs))
            self._Q_optimizers_per_policy.append(Q_optimizers)

            Q_training_ops = tuple(
                Q_optimizer.minimize(loss=Q_loss, var_list=Q.trainable_variables)
                for i, (Q, Q_loss, Q_optimizer)
                in enumerate(zip(Qs, Q_losses, Q_optimizers)))

            self._training_ops_per_policy[i].update({f'Q_{i}': tf.group(Q_training_ops)})

    def _init_actor_updates(self):
        """Create minimization operations for policies and entropies.

        Creates a `tf.optimizer.minimize` operations for updating
        policy and entropy with gradient descent, and adds them to
        `self._training_ops` attribute.

        See Section 4.2 in [1], for further information of the policy update,
        and Section 5 in [1] for further information of the entropy update.
        """

        self._log_alphas = []
        self._alpha_optimizers = []
        self._alphas = []
        self._policy_optimizers = []
        self._policy_losses = []

        for i, policy in enumerate(self._policies):
            policy_inputs = flatten_input_structure({
                name: self._placeholders['observations'][name]
                for name in policy.observation_keys
            })
            actions = policy.actions(policy_inputs)
            log_pis = policy.log_pis(policy_inputs, actions)

            assert log_pis.shape.as_list() == [None, 1]

            log_alpha = tf.compat.v1.get_variable(
                f'log_alpha_{i}',
                dtype=tf.float32,
                initializer=0.0)
            alpha = tf.exp(log_alpha)
            self._log_alphas.append(log_alpha)
            self._alphas.append(alpha)

            if isinstance(self._target_entropy, Number):
                alpha_loss = -tf.reduce_mean(
                    log_alpha * tf.stop_gradient(log_pis + self._target_entropy))

                alpha_optimizer = tf.compat.v1.train.AdamOptimizer(
                    self._policy_lr, name=f'alpha_optimizer_{i}')
                self._alpha_optimizers.append(alpha_optimizer)
                alpha_train_op = alpha_optimizer.minimize(loss=alpha_loss, var_list=[log_alpha])
                self._training_ops_per_policy[i].update({
                    f'temperature_alpha_{i}': alpha_train_op
                })

            if self._action_prior == 'normal':
                policy_prior = tfp.distributions.MultivariateNormalDiag(
                    loc=tf.zeros(self._action_shape),
                    scale_diag=tf.ones(self._action_shape))
                policy_prior_log_probs = policy_prior.log_prob(actions)
            elif self._action_prior == 'uniform':
                policy_prior_log_probs = 0.0

            Q_observations = {
                name: self._placeholders['observations'][name]
                for name in self._Qs_per_policy[i][0].observation_keys
            }
            Q_inputs = flatten_input_structure({
                **Q_observations, 'actions': actions})
            Q_log_targets = tuple(Q(Q_inputs) for Q in self._Qs_per_policy[i])
            min_Q_log_target = tf.reduce_min(Q_log_targets, axis=0)

            if self._reparameterize:
                policy_kl_losses = (
                    alpha * log_pis
                    - min_Q_log_target
                    - policy_prior_log_probs)
            else:
                raise NotImplementedError

            assert policy_kl_losses.shape.as_list() == [None, 1]

            self._policy_losses.append(policy_kl_losses)
            policy_loss = tf.reduce_mean(policy_kl_losses)

            policy_optimizer = tf.compat.v1.train.AdamOptimizer(
                learning_rate=self._policy_lr,
                name=f"policy_optimizer_{i}")

            self._policy_optimizers.append(policy_optimizer)

            policy_train_op = policy_optimizer.minimize(
                loss=policy_loss,
                var_list=policy.trainable_variables)

            self._training_ops_per_policy[i].update({f'policy_train_op_{i}': policy_train_op})

    def _init_rnd_updates(self):
        (self._rnd_errors,
         self._rnd_losses,
         self._rnd_error_stds,
         self._rnd_optimizers) = [], [], [], []
        for i in range(self._num_goals):
            self._placeholders['reward'].update({
                f'running_int_rew_std_{i}': tf.compat.v1.placeholder(
                    tf.float32, shape=(), name=f'running_int_rew_std_{i}')
            })
            policy_inputs = flatten_input_structure({
                name: self._placeholders['observations'][name]
                for name in self._rnd_predictors[i].observation_keys
            })

            targets = tf.stop_gradient(self._rnd_targets[i](policy_inputs))
            predictions = self._rnd_predictors[i](policy_inputs)

            self._rnd_errors.append(tf.expand_dims(tf.reduce_mean(
                tf.math.squared_difference(targets, predictions), axis=-1), 1))
            self._rnd_losses.append(tf.reduce_mean(self._rnd_errors[i]))
            self._rnd_error_stds.append(tf.math.reduce_std(self._rnd_errors[i]))
            self._rnd_optimizers.append(tf.compat.v1.train.AdamOptimizer(
                learning_rate=self._rnd_lr,
                name=f"rnd_optimizer_{i}"))
            rnd_train_op = self._rnd_optimizers[i].minimize(
                loss=self._rnd_losses[i])
            self._training_ops_per_policy[i].update(
                {f'rnd_train_op_{i}': rnd_train_op}
            )

    def _init_diagnostics_ops(self):
        diagnosables_per_goal = [
            OrderedDict((
                (f'Q_value_{i}', self._Q_values_per_policy[i]),
                (f'Q_loss_{i}', self._Q_losses_per_policy[i]),
                (f'policy_loss_{i}', self._policy_losses[i]),
                (f'alpha_{i}', self._alphas[i])
            ))
            for i in range(self._num_goals)
        ]

        for i in range(self._num_goals):
            # Only record the intrinsic/extrinsic reward diagnostics if
            # the reward is actually used (i.e. the reward coeff is not 0)
            if self._rnd_int_rew_coeffs[i]:
                diagnosables_per_goal[i][f'rnd_reward_{i}'] = self._int_rewards[i]
                diagnosables_per_goal[i][f'rnd_error_{i}'] = self._rnd_errors[i]
                diagnosables_per_goal[i][f'running_rnd_reward_std_{i}'] = (
                    self._placeholders['reward'][f'running_int_rew_std_{i}'])

            if self._ext_reward_coeffs[i]:
                diagnosables_per_goal[i][f'ext_reward_{i}'] = self._ext_rewards[i]
                diagnosables_per_goal[i][f'normalized_ext_reward_{i}'] = (
                    self._normalized_ext_rewards[i])
                diagnosables_per_goal[i][f'unnormalized_ext_reward_{i}'] = (
                    self._unscaled_ext_rewards[i])

            diagnosables_per_goal[i][f'running_ext_reward_std_{i}'] = (
                self._placeholders['reward'][f'running_ext_rew_std_{i}'])
            diagnosables_per_goal[i][f'total_reward_{i}'] = self._total_rewards[i]
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



MTRF/algorithms/softlearning/algorithms/phased_sac.py [252:455]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    def _init_critic_updates(self):
        """Create minimization operation for critics' Q-functions.

        Creates a `tf.optimizer.minimize` operation for updating
        critic Q-function with gradient descent, and appends it to
        `self._training_ops` attribute.

        See Equations (5, 6) in [1], for further information of the
        Q-function update rule.
        """
        Q_targets = self._get_Q_targets()
        assert len(Q_targets) == len(self._policies)
        for Q_target in Q_targets:
            assert Q_target.shape.as_list() == [None, 1]

        self._Q_optimizers_per_policy = []
        self._Q_values_per_policy = []
        self._Q_losses_per_policy = []

        for i, Qs in enumerate(self._Qs_per_policy):
            Q_observations = {
                name: self._placeholders['observations'][name]
                for name in Qs[0].observation_keys
            }
            Q_inputs = flatten_input_structure({
                **Q_observations, 'actions': self._placeholders['actions']})

            Q_values = tuple(Q(Q_inputs) for Q in Qs)
            self._Q_values_per_policy.append(Q_values)

            Q_losses = tuple(
                tf.compat.v1.losses.mean_squared_error(
                    labels=Q_targets[i], predictions=Q_value, weights=0.5)
                for Q_value in Q_values)
            self._Q_losses_per_policy.append(Q_losses)

            # self._bellman_errors.append(tf.reduce_min(tuple(
            #     tf.math.squared_difference(Q_target, Q_value)
            #     for Q_value in Q_values), axis=0))

            Q_optimizers = tuple(
                tf.compat.v1.train.AdamOptimizer(
                    learning_rate=self._Q_lr,
                    name='{}_{}_optimizer_{}'.format(i, Q._name, j)
                ) for j, Q in enumerate(Qs))
            self._Q_optimizers_per_policy.append(Q_optimizers)

            Q_training_ops = tuple(
                Q_optimizer.minimize(loss=Q_loss, var_list=Q.trainable_variables)
                for i, (Q, Q_loss, Q_optimizer)
                in enumerate(zip(Qs, Q_losses, Q_optimizers)))

            self._training_ops_per_policy[i].update({f'Q_{i}': tf.group(Q_training_ops)})

    def _init_actor_updates(self):
        """Create minimization operations for policies and entropies.

        Creates a `tf.optimizer.minimize` operations for updating
        policy and entropy with gradient descent, and adds them to
        `self._training_ops` attribute.

        See Section 4.2 in [1], for further information of the policy update,
        and Section 5 in [1] for further information of the entropy update.
        """

        self._log_alphas = []
        self._alpha_optimizers = []
        self._alphas = []
        self._policy_optimizers = []
        self._policy_losses = []

        for i, policy in enumerate(self._policies):
            policy_inputs = flatten_input_structure({
                name: self._placeholders['observations'][name]
                for name in policy.observation_keys
            })
            actions = policy.actions(policy_inputs)
            log_pis = policy.log_pis(policy_inputs, actions)

            assert log_pis.shape.as_list() == [None, 1]

            log_alpha = tf.compat.v1.get_variable(
                f'log_alpha_{i}',
                dtype=tf.float32,
                initializer=0.0)
            alpha = tf.exp(log_alpha)
            self._log_alphas.append(log_alpha)
            self._alphas.append(alpha)

            if isinstance(self._target_entropy, Number):
                alpha_loss = -tf.reduce_mean(
                    log_alpha * tf.stop_gradient(log_pis + self._target_entropy))

                alpha_optimizer = tf.compat.v1.train.AdamOptimizer(
                    self._policy_lr, name=f'alpha_optimizer_{i}')
                self._alpha_optimizers.append(alpha_optimizer)
                alpha_train_op = alpha_optimizer.minimize(loss=alpha_loss, var_list=[log_alpha])
                self._training_ops_per_policy[i].update({
                    f'temperature_alpha_{i}': alpha_train_op
                })

            if self._action_prior == 'normal':
                policy_prior = tfp.distributions.MultivariateNormalDiag(
                    loc=tf.zeros(self._action_shape),
                    scale_diag=tf.ones(self._action_shape))
                policy_prior_log_probs = policy_prior.log_prob(actions)
            elif self._action_prior == 'uniform':
                policy_prior_log_probs = 0.0

            Q_observations = {
                name: self._placeholders['observations'][name]
                for name in self._Qs_per_policy[i][0].observation_keys
            }
            Q_inputs = flatten_input_structure({
                **Q_observations, 'actions': actions})
            Q_log_targets = tuple(Q(Q_inputs) for Q in self._Qs_per_policy[i])
            min_Q_log_target = tf.reduce_min(Q_log_targets, axis=0)

            if self._reparameterize:
                policy_kl_losses = (
                    alpha * log_pis
                    - min_Q_log_target
                    - policy_prior_log_probs)
            else:
                raise NotImplementedError

            assert policy_kl_losses.shape.as_list() == [None, 1]

            self._policy_losses.append(policy_kl_losses)
            policy_loss = tf.reduce_mean(policy_kl_losses)

            policy_optimizer = tf.compat.v1.train.AdamOptimizer(
                learning_rate=self._policy_lr,
                name=f"policy_optimizer_{i}")

            self._policy_optimizers.append(policy_optimizer)

            policy_train_op = policy_optimizer.minimize(
                loss=policy_loss,
                var_list=policy.trainable_variables)

            self._training_ops_per_policy[i].update({f'policy_train_op_{i}': policy_train_op})

    def _init_rnd_updates(self):
        (self._rnd_errors,
         self._rnd_losses,
         self._rnd_error_stds,
         self._rnd_optimizers) = [], [], [], []
        for i in range(self._num_goals):
            self._placeholders['reward'].update({
                f'running_int_rew_std_{i}': tf.compat.v1.placeholder(
                    tf.float32, shape=(), name=f'running_int_rew_std_{i}')
            })
            policy_inputs = flatten_input_structure({
                name: self._placeholders['observations'][name]
                for name in self._rnd_predictors[i].observation_keys
            })

            targets = tf.stop_gradient(self._rnd_targets[i](policy_inputs))
            predictions = self._rnd_predictors[i](policy_inputs)

            self._rnd_errors.append(tf.expand_dims(tf.reduce_mean(
                tf.math.squared_difference(targets, predictions), axis=-1), 1))
            self._rnd_losses.append(tf.reduce_mean(self._rnd_errors[i]))
            self._rnd_error_stds.append(tf.math.reduce_std(self._rnd_errors[i]))
            self._rnd_optimizers.append(tf.compat.v1.train.AdamOptimizer(
                learning_rate=self._rnd_lr,
                name=f"rnd_optimizer_{i}"))
            rnd_train_op = self._rnd_optimizers[i].minimize(
                loss=self._rnd_losses[i])
            self._training_ops_per_policy[i].update(
                {f'rnd_train_op_{i}': rnd_train_op}
            )

    def _init_diagnostics_ops(self):
        diagnosables_per_goal = [
            OrderedDict((
                (f'Q_value_{i}', self._Q_values_per_policy[i]),
                (f'Q_loss_{i}', self._Q_losses_per_policy[i]),
                (f'policy_loss_{i}', self._policy_losses[i]),
                (f'alpha_{i}', self._alphas[i])
            ))
            for i in range(self._num_goals)
        ]

        for i in range(self._num_goals):
            # Only record the intrinsic/extrinsic reward diagnostics if
            # the reward is actually used (i.e. the reward coeff is not 0)
            if self._rnd_int_rew_coeffs[i]:
                diagnosables_per_goal[i][f'rnd_reward_{i}'] = self._int_rewards[i]
                diagnosables_per_goal[i][f'rnd_error_{i}'] = self._rnd_errors[i]
                diagnosables_per_goal[i][f'running_rnd_reward_std_{i}'] = (
                    self._placeholders['reward'][f'running_int_rew_std_{i}'])

            if self._ext_reward_coeffs[i]:
                diagnosables_per_goal[i][f'ext_reward_{i}'] = self._ext_rewards[i]
                diagnosables_per_goal[i][f'normalized_ext_reward_{i}'] = (
                    self._normalized_ext_rewards[i])
                diagnosables_per_goal[i][f'unnormalized_ext_reward_{i}'] = (
                    self._unscaled_ext_rewards[i])

            diagnosables_per_goal[i][f'running_ext_reward_std_{i}'] = (
                self._placeholders['reward'][f'running_ext_rew_std_{i}'])
            diagnosables_per_goal[i][f'total_reward_{i}'] = self._total_rewards[i]
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



