exploring_exploration/models/navigation.py [517:585]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    def is_feasible_path(self, occupancy_map, path):
        """
        occupancy map - (H, W, 3) RGB input
        path - [[x0, x1, ...], [y0, y1, ...]]
        """
        path_pixels = occupancy_map[path[1], path[0]]
        path_is_blocked = np.any(np.all(path_pixels == np.array([0, 0, 255]), axis=-1))
        return not path_is_blocked

    def update_target(self, prev_delta):
        self.low_level_target = self.ego_transform(self.low_level_target, prev_delta)

    def ego_transform(self, target, prev_delta):
        """
        Update the target to the new egocentric coordinate system.
        Inputs:
            prev_delta - (dx, dy, dtheta) motion in the previous position's
                         frame of reference
        """
        # Note: X - forward, Y - rightward in egocentric frame of references
        # Perform update in egocentric coordinate
        x, y = self._convert_to_egocentric(target)
        dx, dy, dt = prev_delta
        # Translate
        xp, yp = x - dx, y - dy
        # Rotate by -dt
        xp, yp = (
            math.cos(-dt) * xp - math.sin(-dt) * yp,
            math.sin(-dt) * xp + math.cos(-dt) * yp,
        )
        # Convert back to image coordinate
        xi, yi = self._convert_to_image((xp, yp))
        xi = np.clip(xi, 1, self.map_size - 2)
        yi = np.clip(yi, 1, self.map_size - 2)
        return (int(xi), int(yi))

    def _convert_to_egocentric(self, coords):
        return (-coords[1] + self.map_size / 2, coords[0] - self.map_size / 2)

    def _convert_to_image(self, coords):
        # Forward - positive X, rightward - positive Y
        return (coords[1] + self.map_size / 2, -coords[0] + self.map_size / 2)

    def reset(self):
        self.high_level_actor.reset()
        self.low_level_actor.reset()
        self._rng = random.Random(123)
        self.high_level_target = None
        self.low_level_target = None
        self.map_size = None
        self.planned_path = None
        self.intermediate_goal_idx = None

    def _caught_in_rotation(self):
        if len(self.past_actions) > 1:
            l = self.action_space["left"]
            r = self.action_space["right"]
            if (self.past_actions[-2] == l and self.past_actions[-1] == r) or (
                self.past_actions[-2] == r and self.past_actions[-1] == l
            ):
                return True
        return False

    @property
    def planning_failure_flag(self):
        return self.high_level_actor.planning_failure_flag

    @property
    def planning_visualization(self):
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



exploring_exploration/models/navigation.py [780:852]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    def is_feasible_path(self, occupancy_map, path):
        """
        occupancy map - (H, W, 3) RGB input
        path - [[x0, x1, ...], [y0, y1, ...]]
        """
        path_pixels = occupancy_map[path[1], path[0]]
        path_is_blocked = np.any(np.all(path_pixels == np.array([0, 0, 255]), axis=-1))
        return not path_is_blocked

    def update_target(self, prev_delta):
        self.low_level_target = self.ego_transform(self.low_level_target, prev_delta)

    def ego_transform(self, target, prev_delta):
        """
        Update the target to the new egocentric coordinate system.
        Inputs:
            prev_delta - (dx, dy, dtheta) motion in the previous position's
                         frame of reference
        """
        # Note: X - forward, Y - rightward in egocentric frame of references

        # Perform update in egocentric coordinate
        x, y = self._convert_to_egocentric(target)

        dx, dy, dt = prev_delta
        # Translate
        xp, yp = x - dx, y - dy
        # Rotate by -dt
        xp, yp = (
            math.cos(-dt) * xp - math.sin(-dt) * yp,
            math.sin(-dt) * xp + math.cos(-dt) * yp,
        )

        # Convert back to image coordinate
        xi, yi = self._convert_to_image((xp, yp))
        xi = np.clip(xi, 1, self.map_size - 2)
        yi = np.clip(yi, 1, self.map_size - 2)

        return (int(xi), int(yi))

    def _convert_to_egocentric(self, coords):
        return (-coords[1] + self.map_size / 2, coords[0] - self.map_size / 2)

    def _convert_to_image(self, coords):
        # Forward - positive X, rightward - positive Y
        return (coords[1] + self.map_size / 2, -coords[0] + self.map_size / 2)

    def reset(self):
        self.high_level_actor.reset()
        self.low_level_actor.reset()
        self._rng = random.Random(123)
        self.high_level_target = None
        self.low_level_target = None
        self.map_size = None
        self.planned_path = None
        self.intermediate_goal_idx = None

    def _caught_in_rotation(self):
        if len(self.past_actions) > 1:
            l = self.action_space["left"]
            r = self.action_space["right"]
            if (self.past_actions[-2] == l and self.past_actions[-1] == r) or (
                self.past_actions[-2] == r and self.past_actions[-1] == l
            ):
                return True
        return False

    @property
    def planning_failure_flag(self):
        return self.high_level_actor.planning_failure_flag

    @property
    def planning_visualization(self):
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



