in Experiments/Visualizers.py [0:0]
def set_joint_pose_return_image(self, joint_pose, arm='both', gripper=False):
# FOR FULL 16 DOF STATE: ASSUMES JOINT_POSE IS <LEFT_JA, RIGHT_JA, LEFT_GRIPPER, RIGHT_GRIPPER>.
self.update_state()
self.state = copy.deepcopy(self.full_state['joint_pos'])
# THE FIRST 7 JOINT ANGLES IN MUJOCO ARE THE RIGHT HAND.
# THE LAST 7 JOINT ANGLES IN MUJOCO ARE THE LEFT HAND.
if arm=='right':
# Assume joint_pose is 8 DoF - 7 for the arm, and 1 for the gripper.
self.state[:7] = copy.deepcopy(joint_pose[:7])
elif arm=='left':
# Assume joint_pose is 8 DoF - 7 for the arm, and 1 for the gripper.
self.state[7:] = copy.deepcopy(joint_pose[:7])
elif arm=='both':
# The Plans were generated as: Left arm, Right arm, left gripper, right gripper.
# Assume joint_pose is 16 DoF. 7 DoF for left arm, 7 DoF for right arm. (These need to be flipped)., 1 for left gripper. 1 for right gripper.
# First right hand.
self.state[:7] = joint_pose[7:14]
# Now left hand.
self.state[7:] = joint_pose[:7]
# Set the joint angles magically.
self.environment.set_robot_joint_positions(self.state)
action = np.zeros((16))
if gripper:
# Left gripper is 15. Right gripper is 14.
# MIME Gripper values are from 0 to 100 (Close to Open), but we treat the inputs to this function as 0 to 1 (Close to Open), and then rescale to (-1 Open to 1 Close) for Mujoco.
if arm=='right':
action[14] = -joint_pose[-1]*2+1
elif arm=='left':
action[15] = -joint_pose[-1]*2+1
elif arm=='both':
action[14] = -joint_pose[15]*2+1
action[15] = -joint_pose[14]*2+1
# Move gripper positions.
self.environment.step(action)
image = np.flipud(self.environment.sim.render(600, 600, camera_name='vizview1'))
return image