tf_agents/agents/cql/cql_sac_agent.py [72:88]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
               actor_loss_weight: types.Float = 1.0,
               critic_loss_weight: types.Float = 0.5,
               alpha_loss_weight: types.Float = 1.0,
               actor_policy_ctor: Callable[
                   ..., tf_policy.TFPolicy] = actor_policy.ActorPolicy,
               critic_network_2: Optional[network.Network] = None,
               target_critic_network: Optional[network.Network] = None,
               target_critic_network_2: Optional[network.Network] = None,
               target_update_tau: types.Float = 1.0,
               target_update_period: types.Int = 1,
               td_errors_loss_fn: types.LossFn = tf.math.squared_difference,
               gamma: types.Float = 1.0,
               reward_scale_factor: types.Float = 1.0,
               initial_log_alpha: types.Float = 0.0,
               use_log_alpha_in_alpha_loss: bool = True,
               target_entropy: Optional[types.Float] = None,
               gradient_clipping: Optional[types.Float] = None,
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



tf_agents/agents/sac/sac_agent.py [72:88]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
               actor_loss_weight: types.Float = 1.0,
               critic_loss_weight: types.Float = 0.5,
               alpha_loss_weight: types.Float = 1.0,
               actor_policy_ctor: Callable[
                   ..., tf_policy.TFPolicy] = actor_policy.ActorPolicy,
               critic_network_2: Optional[network.Network] = None,
               target_critic_network: Optional[network.Network] = None,
               target_critic_network_2: Optional[network.Network] = None,
               target_update_tau: types.Float = 1.0,
               target_update_period: types.Int = 1,
               td_errors_loss_fn: types.LossFn = tf.math.squared_difference,
               gamma: types.Float = 1.0,
               reward_scale_factor: types.Float = 1.0,
               initial_log_alpha: types.Float = 0.0,
               use_log_alpha_in_alpha_loss: bool = True,
               target_entropy: Optional[types.Float] = None,
               gradient_clipping: Optional[types.Float] = None,
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



