in soundspaces/simulator.py [0:0]
def step(self, action, only_allowed=True):
"""
All angle calculations in this function is w.r.t habitat coordinate frame, on X-Z plane
where +Y is upward, -Z is forward and +X is rightward.
Angle 0 corresponds to +X, angle 90 corresponds to +y and 290 corresponds to 270.
:param action: action to be taken
:param only_allowed: if true, then can't step anywhere except allowed locations
:return:
Dict of observations
"""
assert self._is_episode_active, (
"episode is not active, environment not RESET or "
"STOP action called previously"
)
self._previous_step_collided = False
# STOP: 0, FORWARD: 1, LEFT: 2, RIGHT: 2
if action == HabitatSimActions.STOP:
self._is_episode_active = False
else:
prev_position_index = self._receiver_position_index
prev_rotation_angle = self._rotation_angle
if action == HabitatSimActions.MOVE_FORWARD:
# the agent initially faces -Z by default
self._previous_step_collided = True
for neighbor in self.graph[self._receiver_position_index]:
p1 = self.graph.nodes[self._receiver_position_index]['point']
p2 = self.graph.nodes[neighbor]['point']
direction = int(np.around(np.rad2deg(np.arctan2(p2[2] - p1[2], p2[0] - p1[0])))) % 360
if direction == self.get_orientation():
self._receiver_position_index = neighbor
self._previous_step_collided = False
break
elif action == HabitatSimActions.TURN_LEFT:
# agent rotates counterclockwise, so turning left means increasing rotation angle by 90
self._rotation_angle = (self._rotation_angle + 90) % 360
elif action == HabitatSimActions.TURN_RIGHT:
self._rotation_angle = (self._rotation_angle - 90) % 360
if self.config.CONTINUOUS_VIEW_CHANGE:
intermediate_observations = list()
fps = self.config.VIEW_CHANGE_FPS
if action == HabitatSimActions.MOVE_FORWARD:
prev_position = np.array(self.graph.nodes[prev_position_index]['point'])
current_position = np.array(self.graph.nodes[self._receiver_position_index]['point'])
for i in range(1, fps):
intermediate_position = prev_position + i / fps * (current_position - prev_position)
self.set_agent_state(intermediate_position.tolist(), quat_from_angle_axis(np.deg2rad(
self._rotation_angle), np.array([0, 1, 0])))
sim_obs = self._sim.get_sensor_observations()
observations = self._sensor_suite.get_observations(sim_obs)
intermediate_observations.append(observations)
else:
for i in range(1, fps):
if action == HabitatSimActions.TURN_LEFT:
intermediate_rotation = prev_rotation_angle + i / fps * 90
elif action == HabitatSimActions.TURN_RIGHT:
intermediate_rotation = prev_rotation_angle - i / fps * 90
self.set_agent_state(list(self.graph.nodes[self._receiver_position_index]['point']),
quat_from_angle_axis(np.deg2rad(intermediate_rotation),
np.array([0, 1, 0])))
sim_obs = self._sim.get_sensor_observations()
observations = self._sensor_suite.get_observations(sim_obs)
intermediate_observations.append(observations)
self.set_agent_state(list(self.graph.nodes[self._receiver_position_index]['point']),
quat_from_angle_axis(np.deg2rad(self._rotation_angle), np.array([0, 1, 0])))
self._episode_step_count += 1
# log debugging info
logging.debug('After taking action {}, s,r: {}, {}, orientation: {}, location: {}'.format(
action, self._source_position_index, self._receiver_position_index,
self.get_orientation(), self.graph.nodes[self._receiver_position_index]['point']))
sim_obs = self._get_sim_observation()
if self.config.USE_RENDERED_OBSERVATIONS:
self._sim.set_sensor_observations(sim_obs)
self._prev_sim_obs = sim_obs
observations = self._sensor_suite.get_observations(sim_obs)
if self.config.CONTINUOUS_VIEW_CHANGE:
observations['intermediate'] = intermediate_observations
return observations