def __getitem__()

in dataset/co3d_dataset.py [0:0]


    def __getitem__(self, index):
        assert index < len(
            self.frame_annots
        ), f"index {index} out of range {len(self.frame_annots)}"

        entry = self.frame_annots[index]["frame_annotation"]
        point_cloud = self.seq_annots[entry.sequence_name].point_cloud
        frame_data = FrameData(
            frame_number=_safe_as_tensor(entry.frame_number, torch.long),
            frame_timestamp=_safe_as_tensor(entry.frame_timestamp, torch.float),
            sequence_name=entry.sequence_name,
            sequence_category=self.seq_annots[entry.sequence_name].category,
            # original image size
            image_size_hw=_safe_as_tensor(entry.image.size, torch.long),
            camera_quality_score=_safe_as_tensor(
                self.seq_annots[entry.sequence_name].viewpoint_quality_score,
                torch.float,
            ),
            point_cloud_quality_score=_safe_as_tensor(
                point_cloud.quality_score, torch.float
            )
            if point_cloud is not None
            else None,
        )

        # The rest of the fields are optional
        frame_data.frame_type = self._get_frame_type(self.frame_annots[index])

        (
            frame_data.fg_probability,
            frame_data.mask_path,
            frame_data.bbox_xywh,
            clamp_bbox_xyxy,
        ) = self._load_crop_fg_probability(entry)

        scale = 1.0
        if self.load_images:
            (
                frame_data.image_rgb,
                frame_data.image_path,
                frame_data.mask_crop,
                scale,
            ) = self._load_crop_images(
                entry, frame_data.fg_probability, clamp_bbox_xyxy
            )

        if self.load_depths and entry.depth is not None:
            (
                frame_data.depth_map,
                frame_data.depth_path,
                frame_data.depth_mask,
            ) = self._load_mask_depth(entry, clamp_bbox_xyxy, frame_data.fg_probability)

        frame_data.camera = self._get_pytorch3d_camera(
            entry,
            scale,
            clamp_bbox_xyxy,
        )

        if point_cloud is not None:
            frame_data.sequence_point_cloud_path = os.path.join(
                self.dataset_root, point_cloud.path
            )
            frame_data.sequence_point_cloud = (
                _load_pointcloud(
                    frame_data.sequence_point_cloud_path,
                    max_points=self.max_points,
                )
                if os.path.isfile(frame_data.sequence_point_cloud_path)
                else None
            )

        return frame_data