in baselines/her/ddpg.py [0:0]
def _create_network(self, reuse=False):
logger.info("Creating a DDPG agent with action space %d x %s..." % (self.dimu, self.max_u))
self.sess = tf_util.get_session()
# running averages
with tf.variable_scope('o_stats') as vs:
if reuse:
vs.reuse_variables()
self.o_stats = Normalizer(self.dimo, self.norm_eps, self.norm_clip, sess=self.sess)
with tf.variable_scope('g_stats') as vs:
if reuse:
vs.reuse_variables()
self.g_stats = Normalizer(self.dimg, self.norm_eps, self.norm_clip, sess=self.sess)
# mini-batch sampling.
batch = self.staging_tf.get()
batch_tf = OrderedDict([(key, batch[i])
for i, key in enumerate(self.stage_shapes.keys())])
batch_tf['r'] = tf.reshape(batch_tf['r'], [-1, 1])
#choose only the demo buffer samples
mask = np.concatenate((np.zeros(self.batch_size - self.demo_batch_size), np.ones(self.demo_batch_size)), axis = 0)
# networks
with tf.variable_scope('main') as vs:
if reuse:
vs.reuse_variables()
self.main = self.create_actor_critic(batch_tf, net_type='main', **self.__dict__)
vs.reuse_variables()
with tf.variable_scope('target') as vs:
if reuse:
vs.reuse_variables()
target_batch_tf = batch_tf.copy()
target_batch_tf['o'] = batch_tf['o_2']
target_batch_tf['g'] = batch_tf['g_2']
self.target = self.create_actor_critic(
target_batch_tf, net_type='target', **self.__dict__)
vs.reuse_variables()
assert len(self._vars("main")) == len(self._vars("target"))
# loss functions
target_Q_pi_tf = self.target.Q_pi_tf
clip_range = (-self.clip_return, 0. if self.clip_pos_returns else np.inf)
target_tf = tf.clip_by_value(batch_tf['r'] + self.gamma * target_Q_pi_tf, *clip_range)
self.Q_loss_tf = tf.reduce_mean(tf.square(tf.stop_gradient(target_tf) - self.main.Q_tf))
if self.bc_loss ==1 and self.q_filter == 1 : # train with demonstrations and use bc_loss and q_filter both
maskMain = tf.reshape(tf.boolean_mask(self.main.Q_tf > self.main.Q_pi_tf, mask), [-1]) #where is the demonstrator action better than actor action according to the critic? choose those samples only
#define the cloning loss on the actor's actions only on the samples which adhere to the above masks
self.cloning_loss_tf = tf.reduce_sum(tf.square(tf.boolean_mask(tf.boolean_mask((self.main.pi_tf), mask), maskMain, axis=0) - tf.boolean_mask(tf.boolean_mask((batch_tf['u']), mask), maskMain, axis=0)))
self.pi_loss_tf = -self.prm_loss_weight * tf.reduce_mean(self.main.Q_pi_tf) #primary loss scaled by it's respective weight prm_loss_weight
self.pi_loss_tf += self.prm_loss_weight * self.action_l2 * tf.reduce_mean(tf.square(self.main.pi_tf / self.max_u)) #L2 loss on action values scaled by the same weight prm_loss_weight
self.pi_loss_tf += self.aux_loss_weight * self.cloning_loss_tf #adding the cloning loss to the actor loss as an auxilliary loss scaled by its weight aux_loss_weight
elif self.bc_loss == 1 and self.q_filter == 0: # train with demonstrations without q_filter
self.cloning_loss_tf = tf.reduce_sum(tf.square(tf.boolean_mask((self.main.pi_tf), mask) - tf.boolean_mask((batch_tf['u']), mask)))
self.pi_loss_tf = -self.prm_loss_weight * tf.reduce_mean(self.main.Q_pi_tf)
self.pi_loss_tf += self.prm_loss_weight * self.action_l2 * tf.reduce_mean(tf.square(self.main.pi_tf / self.max_u))
self.pi_loss_tf += self.aux_loss_weight * self.cloning_loss_tf
else: #If not training with demonstrations
self.pi_loss_tf = -tf.reduce_mean(self.main.Q_pi_tf)
self.pi_loss_tf += self.action_l2 * tf.reduce_mean(tf.square(self.main.pi_tf / self.max_u))
Q_grads_tf = tf.gradients(self.Q_loss_tf, self._vars('main/Q'))
pi_grads_tf = tf.gradients(self.pi_loss_tf, self._vars('main/pi'))
assert len(self._vars('main/Q')) == len(Q_grads_tf)
assert len(self._vars('main/pi')) == len(pi_grads_tf)
self.Q_grads_vars_tf = zip(Q_grads_tf, self._vars('main/Q'))
self.pi_grads_vars_tf = zip(pi_grads_tf, self._vars('main/pi'))
self.Q_grad_tf = flatten_grads(grads=Q_grads_tf, var_list=self._vars('main/Q'))
self.pi_grad_tf = flatten_grads(grads=pi_grads_tf, var_list=self._vars('main/pi'))
# optimizers
self.Q_adam = MpiAdam(self._vars('main/Q'), scale_grad_by_procs=False)
self.pi_adam = MpiAdam(self._vars('main/pi'), scale_grad_by_procs=False)
# polyak averaging
self.main_vars = self._vars('main/Q') + self._vars('main/pi')
self.target_vars = self._vars('target/Q') + self._vars('target/pi')
self.stats_vars = self._global_vars('o_stats') + self._global_vars('g_stats')
self.init_target_net_op = list(
map(lambda v: v[0].assign(v[1]), zip(self.target_vars, self.main_vars)))
self.update_target_net_op = list(
map(lambda v: v[0].assign(self.polyak * v[0] + (1. - self.polyak) * v[1]), zip(self.target_vars, self.main_vars)))
# initialize all variables
tf.variables_initializer(self._global_vars('')).run()
self._sync_optimizers()
self._init_target_net()