def transform_annotations()

in meshrcnn/data/meshrcnn_transforms.py [0:0]


    def transform_annotations(self, annotation, transforms, image_size):
        """
        Apply image transformations to the annotations.

        After this method, the box mode will be set to XYXY_ABS.
        """
        bbox = BoxMode.convert(annotation["bbox"], annotation["bbox_mode"], BoxMode.XYXY_ABS)
        # Note that bbox is 1d (per-instance bounding box)
        annotation["bbox"] = transforms.apply_box([bbox])[0]
        annotation["bbox_mode"] = BoxMode.XYXY_ABS

        # each instance contains 1 mask
        if self.mask_on and "segmentation" in annotation:
            annotation["segmentation"] = self._process_mask(annotation["segmentation"], transforms)
        else:
            annotation.pop("segmentation", None)

        # camera
        h, w = image_size
        annotation["K"] = [annotation["K"][0], w / 2.0, h / 2.0]
        annotation["R"] = torch.tensor(annotation["R"])
        annotation["t"] = torch.tensor(annotation["t"])

        if self.zpred_on and "mesh" in annotation:
            annotation["dz"] = self._process_dz(
                annotation["mesh"],
                transforms,
                focal_length=annotation["K"][0],
                R=annotation["R"],
                t=annotation["t"],
            )
        else:
            annotation.pop("dz", None)

        # each instance contains 1 voxel
        if self.voxel_on and "voxel" in annotation:
            annotation["voxel"] = self._process_voxel(
                annotation["voxel"], transforms, R=annotation["R"], t=annotation["t"]
            )
        else:
            annotation.pop("voxel", None)

        # each instance contains 1 mesh
        if self.mesh_on and "mesh" in annotation:
            annotation["mesh"] = self._process_mesh(
                annotation["mesh"], transforms, R=annotation["R"], t=annotation["t"]
            )
        else:
            annotation.pop("mesh", None)

        return annotation