in gym/gym/envs/mujoco/humanoid-new.py [0:0]
def _step(self, a):
self.timer += 1
pos_before = mass_center(self.model)
self.do_simulation(a, self.frame_skip)
iq = np.copy(self.model.data.qpos)[:,0]
iv = np.copy(self.model.data.qvel)[:,0]
iq[-1] = 30
if self.realgoal == 1:
iq[-1] = 30
self.set_state(iq, iv)
if self.realgoal == 0:
pos_after = mass_center(self.model)
alive_bonus = 5.0
data = self.model.data
lin_vel_cost = 0.25 * (pos_after - pos_before) / self.model.opt.timestep
quad_ctrl_cost = 0.1 * np.square(data.ctrl).sum()
quad_impact_cost = .5e-6 * np.square(data.cfrc_ext).sum()
quad_impact_cost = min(quad_impact_cost, 10)
reward = 0 - quad_ctrl_cost - quad_impact_cost
qpos = self.model.data.qpos
if not bool((qpos[2] > 0.5)):
reward += alive_bonus + lin_vel_cost
done = bool((qpos[2] > 0.5))
if self.timer < 50:
done = False
elif self.realgoal == 1:
pos_after = mass_center(self.model)
alive_bonus = 5.0
data = self.model.data
lin_vel_cost = 0.25 * (pos_after - pos_before) / self.model.opt.timestep
quad_ctrl_cost = 0.1 * np.square(data.ctrl).sum()
quad_impact_cost = .5e-6 * np.square(data.cfrc_ext).sum()
quad_impact_cost = min(quad_impact_cost, 10)
reward = 0 - quad_ctrl_cost - quad_impact_cost
qpos = self.model.data.qpos
if not bool((qpos[2] < 1.0)):
reward += alive_bonus + lin_vel_cost
done = bool((qpos[2] < 1.0))
done = False
elif self.realgoal == 2:
pos_after = mass_center(self.model)
alive_bonus = 5.0
data = self.model.data
lin_vel_cost = 0.25 * (pos_after - pos_before) / self.model.opt.timestep
quad_ctrl_cost = 0.1 * np.square(data.ctrl).sum()
quad_impact_cost = .5e-6 * np.square(data.cfrc_ext).sum()
quad_impact_cost = min(quad_impact_cost, 10)
reward = 0 - quad_ctrl_cost - quad_impact_cost
qpos = self.model.data.qpos
if not bool((qpos[2] < 1.0)):
reward += alive_bonus - lin_vel_cost
done = bool((qpos[2] < 1.0))
done = False
# pos_after = mass_center(self.model)
# alive_bonus = 5.0
# data = self.model.data
# lin_vel_cost = 0.25 * (pos_after - pos_before) / self.model.opt.timestep
# quad_ctrl_cost = 0.1 * np.square(data.ctrl).sum()
# quad_impact_cost = .5e-6 * np.square(data.cfrc_ext).sum()
# quad_impact_cost = min(quad_impact_cost, 10)
# reward = 0 - quad_ctrl_cost - quad_impact_cost
# qpos = self.model.data.qpos
# if not bool((qpos[2] < 1.0)):
# reward += self.model.data.qpos[2]
# done = bool((qpos[2] < 1.0))
# print(qpos[2])
# done = False
return self._get_obs(), reward, done, {}