exploring_exploration/utils/eval.py [667:787]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        obs_im, obs_sm, obs_lm = get_obs(obs)
        obs_collns = obs["collisions"]
        if actor_type == "frontier":
            delta_ego = torch.zeros((num_processes, 3)).to(device)
            frontier_agent.reset()

        if use_policy:
            recurrent_hidden_states = torch.zeros(num_processes, feat_shape_sim[0]).to(
                device
            )
            masks = torch.zeros(num_processes, 1).to(device)

        nav_policy.reset()

        prev_action = torch.zeros(num_processes, 1).long().to(device)
        prev_collision = obs_collns
        obs_odometer = torch.zeros(num_processes, 4).to(device)
        per_proc_collisions = [0.0 for _ in range(num_processes)]

        # =================================================================
        # ==================== Perform exploration ========================
        # =================================================================
        for step in range(num_steps_exp):
            if use_policy:
                encoder_inputs = [obs_im]
                if encoder_type == "rgb+map":
                    encoder_inputs += [obs_sm, obs_lm]
                with torch.no_grad():
                    policy_feats = encoder(*encoder_inputs)
                    policy_inputs = {"features": policy_feats}
                    if use_action_embedding:
                        policy_inputs["actions"] = prev_action
                    if use_collision_embedding:
                        policy_inputs["collisions"] = prev_collision.long()

                    policy_outputs = actor_critic.act(
                        policy_inputs,
                        recurrent_hidden_states,
                        masks,
                        deterministic=False,
                    )
                    _, action, _, recurrent_hidden_states = policy_outputs
            elif actor_type == "oracle":
                action = obs["oracle_action"].long()
            elif actor_type == "random":
                action = torch.randint(
                    0, envs.action_space.n, (num_processes, 1)
                ).long()
            elif actor_type == "forward":
                action = torch.Tensor(np.ones((num_processes, 1)) * forward_action_id)
                action = action.long()
            elif actor_type == "forward-plus":
                action = torch.Tensor(np.ones((num_processes, 1)) * forward_action_id)
                collision_mask = prev_collision > 0
                action[collision_mask] = turn_action_id
                action = action.long()
            elif actor_type == "frontier":
                # This assumes that num_processes = 1
                occ_map = obs["highres_coarse_occupancy"][0].cpu().numpy()
                occ_map = occ_map.transpose(1, 2, 0)
                occ_map = np.ascontiguousarray(occ_map)
                occ_map = occ_map.astype(np.uint8)
                action = frontier_agent.act(
                    occ_map, delta_ego[0].cpu().numpy(), prev_collision[0].item()
                )
                action = torch.Tensor([[action]]).long()

            obs, reward, done, infos = envs.step(action)
            # Processing environment inputs
            obs_im, obs_sm, obs_lm = get_obs(obs)
            obs_collns = obs["collisions"]

            obs_odometer_curr = process_odometer(obs["delta"])
            if actor_type == "frontier":
                delta_ego = compute_egocentric_coors(
                    obs_odometer_curr, obs_odometer, occ_map_scale,
                )  # (N, 3) --- (dx_ego, dy_ego, dt_ego)

            # Always set masks to 1 (does not matter for now)
            masks = torch.FloatTensor([[1.0] for _ in range(num_processes)]).to(device)
            obs_odometer = obs_odometer + obs_odometer_curr

            # This must not reach done = True
            assert done[0] == False

            # Update collisions metric
            for pr in range(num_processes):
                per_proc_collisions[pr] += obs_collns[pr, 0].item()

            prev_collision = obs_collns
            prev_action = action

            # Verifying correctness
            if step == num_steps_exp - 1:
                assert infos[0]["finished_exploration"]
            elif step < num_steps_exp - 1:
                assert not infos[0]["finished_exploration"]
                exploration_topdown_map = infos[0]["topdown_map"]
        # Update Exploration statistics
        for pr in range(num_processes):
            episode_environment_statistics.append(infos[pr]["environment_statistics"])
            exp_area_covered.append(infos[pr]["seen_area"])
            exp_collisions.append(per_proc_collisions[pr])

        # =================================================================
        # ===================== Navigation evaluation =====================
        # =================================================================
        # gather statistics for visualization
        per_proc_rgb = [[] for pr in range(num_processes)]
        per_proc_depth = [[] for pr in range(num_processes)]
        per_proc_fine_occ = [[] for pr in range(num_processes)]
        per_proc_coarse_occ = [[] for pr in range(num_processes)]
        per_proc_topdown_map = [[] for pr in range(num_processes)]
        per_proc_planner_vis = [[] for pr in range(num_processes)]
        per_proc_gt_topdown_map = [[] for pr in range(num_processes)]
        per_proc_initial_planner_vis = [[] for pr in range(num_processes)]
        per_proc_exploration_topdown_map = [[] for pr in range(num_processes)]

        WIDTH, HEIGHT = 300, 300

        nav_policy.reset()
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



exploring_exploration/utils/eval.py [1012:1132]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        obs_im, obs_sm, obs_lm = get_obs(obs)
        obs_collns = obs["collisions"]
        if actor_type == "frontier":
            delta_ego = torch.zeros((num_processes, 3)).to(device)
            frontier_agent.reset()

        if use_policy:
            recurrent_hidden_states = torch.zeros(num_processes, feat_shape_sim[0]).to(
                device
            )
            masks = torch.zeros(num_processes, 1).to(device)

        nav_policy.reset()

        prev_action = torch.zeros(num_processes, 1).long().to(device)
        prev_collision = obs_collns
        obs_odometer = torch.zeros(num_processes, 4).to(device)
        per_proc_collisions = [0.0 for _ in range(num_processes)]

        # =================== Perform exploration ========================
        for step in range(num_steps_exp):
            if use_policy:
                encoder_inputs = [obs_im]
                if encoder_type == "rgb+map":
                    encoder_inputs += [obs_sm, obs_lm]
                with torch.no_grad():
                    policy_feats = encoder(*encoder_inputs)
                    policy_inputs = {"features": policy_feats}
                    if use_action_embedding:
                        policy_inputs["actions"] = prev_action
                    if use_collision_embedding:
                        policy_inputs["collisions"] = prev_collision.long()

                    policy_outputs = actor_critic.act(
                        policy_inputs,
                        recurrent_hidden_states,
                        masks,
                        deterministic=False,
                    )
                    _, action, _, recurrent_hidden_states = policy_outputs
            elif actor_type == "oracle":
                action = obs["oracle_action"].long()
            elif actor_type == "random":
                action = torch.randint(
                    0, envs.action_space.n, (num_processes, 1)
                ).long()
            elif actor_type == "forward":
                action = torch.Tensor(np.ones((num_processes, 1)) * forward_action_id)
                action = action.long()
            elif actor_type == "forward-plus":
                action = torch.Tensor(np.ones((num_processes, 1)) * forward_action_id)
                collision_mask = prev_collision > 0
                action[collision_mask] = turn_action_id
                action = action.long()
            elif actor_type == "frontier":
                # This assumes that num_processes = 1
                occ_map = obs["highres_coarse_occupancy"][0].cpu().numpy()
                occ_map = occ_map.transpose(1, 2, 0)
                occ_map = np.ascontiguousarray(occ_map)
                occ_map = occ_map.astype(np.uint8)
                action = frontier_agent.act(
                    occ_map, delta_ego[0].cpu().numpy(), prev_collision[0].item()
                )
                action = torch.Tensor([[action]]).long()

            obs, reward, done, infos = envs.step(action)
            # Processing environment inputs
            obs_im, obs_sm, obs_lm = get_obs(obs)
            obs_collns = obs["collisions"]

            obs_odometer_curr = process_odometer(obs["delta"])
            if actor_type == "frontier":
                delta_ego = compute_egocentric_coors(
                    obs_odometer_curr, obs_odometer, occ_map_scale,
                )  # (N, 3) --- (dx_ego, dy_ego, dt_ego)

            # Always set masks to 1 (does not matter for now)
            masks = torch.FloatTensor([[1.0] for _ in range(num_processes)]).to(device)
            obs_odometer = obs_odometer + obs_odometer_curr

            # This must not reach done = True
            assert done[0] == False

            # Update collisions metric
            for pr in range(num_processes):
                per_proc_collisions[pr] += obs_collns[pr, 0].item()

            prev_collision = obs_collns
            prev_action = action

            # Debug stuff
            if step == num_steps_exp - 1:
                assert infos[0]["finished_exploration"]
            elif step < num_steps_exp - 1:
                assert not infos[0]["finished_exploration"]
                exploration_topdown_map = infos[0]["topdown_map"]

        # =================================================================
        # ===================== Navigation evaluation =====================
        # =================================================================

        # Exploration statistics
        for pr in range(num_processes):
            episode_environment_statistics.append(infos[pr]["environment_statistics"])
            exp_area_covered.append(infos[pr]["seen_area"])
            exp_collisions.append(per_proc_collisions[pr])

        # gather statistics for visualization
        per_proc_rgb = [[] for pr in range(num_processes)]
        per_proc_depth = [[] for pr in range(num_processes)]
        per_proc_fine_occ = [[] for pr in range(num_processes)]
        per_proc_coarse_occ = [[] for pr in range(num_processes)]
        per_proc_topdown_map = [[] for pr in range(num_processes)]
        per_proc_planner_vis = [[] for pr in range(num_processes)]
        per_proc_gt_topdown_map = [[] for pr in range(num_processes)]
        per_proc_initial_planner_vis = [[] for pr in range(num_processes)]
        per_proc_exploration_topdown_map = [[] for pr in range(num_processes)]

        WIDTH, HEIGHT = 300, 300

        nav_policy.reset()
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



