def _step()

in gym/gym/envs/mujoco/humanoid_seq.py [0:0]


    def _step(self, a):
        if self.count % 250 == 0:
            self.current = self.realgoal[int(self.count / 250)];
            # print("current is %d" % self.current)

        self.count += 1
        pos_before = mass_center(self.model)
        height_before = self.model.data.qpos[2][0]
        self.do_simulation(a, self.frame_skip)
        height_after = self.model.data.qpos[2][0]


        iq = np.copy(self.model.data.qpos)[:,0]
        iv = np.copy(self.model.data.qvel)[:,0]
        iq[-1] = 30
        self.set_state(iq, iv)

        if self.current == 0: # crawling
            pos_after = mass_center(self.model)
            alive_bonus = 5.0
            data = self.model.data
            lin_vel_cost = 0.25 * (pos_after - pos_before) / self.model.opt.timestep
            quad_ctrl_cost = 0.1 * np.square(data.ctrl).sum()
            quad_impact_cost = .5e-6 * np.square(data.cfrc_ext).sum()
            quad_impact_cost = min(quad_impact_cost, 10)

            reward = 0 - quad_ctrl_cost - quad_impact_cost

            qpos = self.model.data.qpos
            if bool((qpos[2] > 1.0)):
                reward += (height_before - height_after) / self.model.opt.timestep
            else:
                reward += alive_bonus + lin_vel_cost

            done = False
        elif self.current == 1: # walking
            pos_after = mass_center(self.model)
            alive_bonus = 5.0
            data = self.model.data
            lin_vel_cost = 0.25 * (pos_after - pos_before) / self.model.opt.timestep
            quad_ctrl_cost = 0.1 * np.square(data.ctrl).sum()
            quad_impact_cost = .5e-6 * np.square(data.cfrc_ext).sum()
            quad_impact_cost = min(quad_impact_cost, 10)

            reward = 0 - quad_ctrl_cost - quad_impact_cost

            qpos = self.model.data.qpos
            if bool((qpos[2] < 1.0)):
                reward += (height_after - height_before) / self.model.opt.timestep
            else:
                reward += alive_bonus + lin_vel_cost

            # done = bool((qpos[2] < 1.0))
            done = False

        # print(qpos[2])
        # if self.count % 10 == 0:
        #     print(reward)
            # print((height_before - height_after) / self.model.opt.timestep)

        return self._get_obs(), reward, done, {}