def serve()

in community-content/vertex_model_garden/model_oss/tfvision/serving/detection.py [0:0]


  def serve(self,
            images: tf.Tensor,
            key: Optional[tf.Tensor] = None) -> Dict[Text, tf.Tensor]:
    """Cast image to float and run inference.

    Args:
      images: uint8 Tensor of input images. For input type image tensor, the
        shape is [batch_size, None, None, 3], for image_bytes, the shape is
        [batch_size].
      key: Optional string Tensor of shape [batch_size]. If not provided
        output tensors will not contain it as well.

    Returns:
      Tensor holding detection output logits.
    """

    images, anchor_boxes, image_info = self.preprocess(images)
    input_image_shape = image_info[:, 1, :]

    # To overcome keras.Model extra limitation to save a model with layers that
    # have multiple inputs, we use `model.call` here to trigger the forward
    # path. Note that, this disables some keras magics happens in `__call__`.
    detections = self.model.call(
        images=images,
        image_shape=input_image_shape,
        anchor_boxes=anchor_boxes,
        training=False)

    if self.params.task.model.detection_generator.apply_nms:
      # For RetinaNet model, apply export_config.
      if isinstance(self.params.task.model, configs.retinanet.RetinaNet):
        export_config = self.params.task.export_config
        # Normalize detection box coordinates to [0, 1].
        if export_config.output_normalized_coordinates:
          detection_boxes = (
              detections['detection_boxes'] /
              tf.tile(image_info[:, 2:3, :], [1, 1, 2]))
          detections['detection_boxes'] = box_ops.normalize_boxes(
              detection_boxes, image_info[:, 0:1, :])

        # Cast num_detections and detection_classes to float. This allows the
        # model inference to work on chain (go/chain) as chain requires floating
        # point outputs.
        if export_config.cast_num_detections_to_float:
          detections['num_detections'] = tf.cast(
              detections['num_detections'], dtype=tf.float32)
        if export_config.cast_detection_classes_to_float:
          detections['detection_classes'] = tf.cast(
              detections['detection_classes'], dtype=tf.float32)

      final_outputs = {
          'detection_boxes': detections['detection_boxes'],
          'detection_scores': detections['detection_scores'],
          'detection_classes': detections['detection_classes'],
          'num_detections': detections['num_detections']
      }
    else:
      final_outputs = {
          'decoded_boxes': detections['decoded_boxes'],
          'decoded_box_scores': detections['decoded_box_scores']
      }

    if 'detection_masks' in detections.keys():
      final_outputs['detection_masks'] = detections['detection_masks']

    # Adding AutoML specific outputs.
    if self._label_map_table is not None:
      final_outputs.update({
          automl_constants.DETECTION_CLASSES_AS_TEXT:
              self._generate_class_text_output(detections['detection_classes'])
      })

    final_outputs.update({'image_info': image_info})
    if key is not None:
      final_outputs.update({automl_constants.OUTPUT_KEY_NAME: key})

    return final_outputs