def batch()

in src/features.py [0:0]


    def batch(self, data, **kwargs) -> (torch.tensor, torch.tensor):
        depths = data[DatasetKeyConstants.depth_image_full]
        sample_indices = data[DatasetKeyConstants.image_sample_indices]

        if self.window_size == 1:
            step = 1.0 / self.n_feat
            features = torch.zeros((torch.numel(sample_indices), self.n_feat),
                                   dtype=torch.float32, device=self.device)
            # use a one hot encoding
            selected_depths = depths.reshape(-1, depths.shape[-1])[sample_indices]
            mask, _ = torch.nonzero(selected_depths < self.ignore_depth_value, as_tuple=True)
            d_disc = torch.clamp_max((selected_depths[mask] / step).type(torch.int64), self.n_feat - 1)

            features[mask, d_disc.T] = 1.
            return features

        else:
            step = 1.0 / self.n_feat
            features = torch.zeros((torch.numel(sample_indices), self.n_feat),
                                   dtype=torch.float32, device=self.device)

            if self.device != "cpu" and self.cuda_batch is not None:
                self.cuda_batch.fill_disc_depth(features, sample_indices,
                                                torch.zeros(1, dtype=torch.int64, device=self.device), depths,
                                                self.window_size, self.h, self.w,
                                                len(sample_indices), 1,
                                                self.center_id, self.n_feat, self.ignore_depth_value, 2)
            else:
                cx = sample_indices % self.w
                cy = sample_indices // self.w
                max_dist = (self.window_size // 2 + 1) * np.sqrt(2.0)

                for i in range(self.window_size):
                    for j in range(self.window_size):
                        weight = (1.0 - np.sqrt((i - self.center_id) ** 2 + (j - self.center_id) ** 2) / max_dist)
                        x = torch.clamp(cx - self.center_id + i, min=0, max=self.w - 1)
                        y = torch.clamp(cy - self.center_id + j, min=0, max=self.h - 1)
                        val = depths[0, y, x].flatten()
                        disc = (val / step).type(torch.int64)
                        mask = torch.nonzero(torch.logical_and(val < self.ignore_depth_value, disc >= 0), as_tuple=True)[0]
                        disc = torch.clamp_max(disc[mask], self.n_feat - 1)
                        features[mask, disc] = torch.max(features[mask, disc],
                                                         torch.tensor([weight], dtype=torch.float32,
                                                                      device=features.device))

            if self.d_window_size > 1:
                features_filtered = torch.nn.functional.conv1d(features[:, None, :], self.d_kernel,
                                                               padding=self.d_window_size // 2)
                features_filtered = torch.clamp(features_filtered, 0., 1.)
                features = features_filtered.reshape(-1, features.shape[1])

            return features