in meshrcnn/data/meshrcnn_transforms.py [0:0]
def annotations_to_instances(annos, image_size):
"""
Create an :class:`Instances` object used by the models,
from instance annotations in the dataset dict.
Args:
annos (list[dict]): a list of annotations, one per instance.
image_size (tuple): height, width
Returns:
Instances: It will contains fields "gt_boxes", "gt_classes",
"gt_masks", "gt_keypoints", if they can be obtained from `annos`.
"""
boxes = [BoxMode.convert(obj["bbox"], obj["bbox_mode"], BoxMode.XYXY_ABS) for obj in annos]
target = Instances(image_size)
boxes = target.gt_boxes = Boxes(boxes)
boxes.clip(image_size)
classes = [obj["category_id"] for obj in annos]
classes = torch.tensor(classes, dtype=torch.int64)
target.gt_classes = classes
if len(annos) and "segmentation" in annos[0]:
masks = [obj["segmentation"] for obj in annos]
target.gt_masks = torch.stack(masks, dim=0)
# camera
if len(annos) and "K" in annos[0]:
K = [torch.tensor(obj["K"]) for obj in annos]
target.gt_K = torch.stack(K, dim=0)
if len(annos) and "voxel" in annos[0]:
voxels = [obj["voxel"] for obj in annos]
target.gt_voxels = VoxelInstances(voxels)
if len(annos) and "mesh" in annos[0]:
meshes = [obj["mesh"] for obj in annos]
target.gt_meshes = MeshInstances(meshes)
if len(annos) and "dz" in annos[0]:
dz = [obj["dz"] for obj in annos]
target.gt_dz = torch.tensor(dz)
return target