def postprocess()

in community-content/vertex_model_garden/model_oss/detectron2/handler.py [0:0]


  def postprocess(self, inference_result: List[Any]) -> List[Any]:
    """Post process inference result."""
    response_list = []
    print("Num inference_items are:", len(inference_result))
    for inference_item in inference_result:
      predictions = inference_item["instances"].to("cpu")
      print("Predictions are:", predictions)
      boxes = None
      if predictions.has("pred_boxes"):
        boxes = predictions.pred_boxes.tensor.numpy().tolist()
      scores = None
      if predictions.has("scores"):
        scores = predictions.scores.numpy().tolist()
      classes = None
      if predictions.has("pred_classes"):
        classes = predictions.pred_classes.numpy().tolist()
      masks_rle = None
      if predictions.has("pred_masks"):
        # Do run length encoding, else the mask output becomes huge.
        masks_rle = [
            mask_util.encode(np.asfortranarray(mask))
            for mask in predictions.pred_masks
        ]
        for rle in masks_rle:
          rle["counts"] = rle["counts"].decode("utf-8")
      response = {
          "classes": classes,
          "scores": scores,
          "boxes": boxes,
          "masks_rle": masks_rle,
      }
      response_list.append(json.dumps(response))
    print("response_list: ", response_list)
    return response_list