datasets/replica.py [61:96]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
                    episode_Rt.append(torch.Tensor(cameras[i]['Rt']))
            episode_Rt = torch.stack(episode_Rt, dim=0)

            trim = episode_Rt.shape[0] % (self.seq_len * self.step)
            episode_Rt = episode_Rt[: episode_Rt.shape[0] - trim]
            Rt.append(episode_Rt)

        Rt = torch.stack(Rt, dim=0)

        # this basically samples points at the stride length
        Rt = Rt.view(-1, self.seq_len, self.step, 4, 4).permute(0, 2, 1, 3, 4).reshape(-1, self.seq_len, 4, 4)

        if self.center is not None:
            Rt = normalize_trajectory(Rt, center=self.center, normalize_rotation=self.normalize_rotation)

        if self.single_sample_per_trajectory:
            # randomly select a single point along each trajectory
            selected_indices = torch.multinomial(torch.ones(Rt.shape[:2]), num_samples=1).squeeze()
            bool_mask = torch.eye(self.seq_len)[selected_indices].bool()
            Rt = Rt[bool_mask].unsqueeze(1)

        if self.rot_aug:
            for i in range(Rt.shape[0]):
                Rt[i] = random_rotation_augment(Rt[i])
        return Rt

    def __len__(self):
        if self.samples_per_epoch:
            return self.samples_per_epoch
        else:
            trajectory_len = self.seq_len * self.step
            n_val_trajectories = int(len(self.seq_idxs) * math.floor(self.episode_len / trajectory_len))
            return n_val_trajectories

    def __getitem__(self, idx):
        random.seed()
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



datasets/vizdoom.py [61:96]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
                    episode_Rt.append(torch.Tensor(cameras[i]['Rt']))
            episode_Rt = torch.stack(episode_Rt, dim=0)

            trim = episode_Rt.shape[0] % (self.seq_len * self.step)
            episode_Rt = episode_Rt[: episode_Rt.shape[0] - trim]
            Rt.append(episode_Rt)

        Rt = torch.stack(Rt, dim=0)

        # this basically samples points at the stride length
        Rt = Rt.view(-1, self.seq_len, self.step, 4, 4).permute(0, 2, 1, 3, 4).reshape(-1, self.seq_len, 4, 4)

        if self.center is not None:
            Rt = normalize_trajectory(Rt, center=self.center, normalize_rotation=self.normalize_rotation)

        if self.single_sample_per_trajectory:
            # randomly select a single point along each trajectory
            selected_indices = torch.multinomial(torch.ones(Rt.shape[:2]), num_samples=1).squeeze()
            bool_mask = torch.eye(self.seq_len)[selected_indices].bool()
            Rt = Rt[bool_mask].unsqueeze(1)

        if self.rot_aug:
            for i in range(Rt.shape[0]):
                Rt[i] = random_rotation_augment(Rt[i])
        return Rt

    def __len__(self):
        if self.samples_per_epoch:
            return self.samples_per_epoch
        else:
            trajectory_len = self.seq_len * self.step
            n_val_trajectories = int(len(self.seq_idxs) * math.floor(self.episode_len / trajectory_len))
            return n_val_trajectories

    def __getitem__(self, idx):
        random.seed()
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



