def evaluate_tflite()

in tensorflow_examples/lite/model_maker/core/task/recommendation.py [0:0]


  def evaluate_tflite(self, tflite_filepath, data):
    """Evaluates the tflite model.

    The data is padded to required length, and multiple metrics are evaluated.

    Args:
      tflite_filepath: File path to the TFLite model.
      data: Data to be evaluated.

    Returns:
      Dict of (metric, value), evaluation result of TFLite model.
    """
    label_name = self.input_spec.label_feature.feature_name
    lite_runner = model_util.get_lite_runner(tflite_filepath, self.model_spec)
    ds = data.gen_dataset(batch_size=1, is_training=False)

    max_output_size = data.max_vocab_id + 1  # +1 because 0 is reserved for OOV.
    eval_top_k = self.model_hparams.eval_top_k
    metrics = [
        _metrics.GlobalRecall(top_k=k, name=f'Global_Recall/Recall_{k}')
        for k in eval_top_k
    ]
    for feature, y_true in data_util.generate_elements(ds):
      feature.pop(label_name)
      x = feature
      ids, scores = lite_runner.run(x)

      # y_true: shape [1, 1]
      # y_pred: shape [1, max_output_size]; fill only scores with top-k ids.
      y_pred = np.zeros([1, max_output_size])
      for i, score in zip(ids, scores):
        if i in data.vocab:  # Only set if id is in vocab.
          y_pred[0, i] = score

      # Update metrics.
      for m in metrics:
        m.update_state(y_true, y_pred)
    result = collections.OrderedDict([(m.name, m.result()) for m in metrics])
    return result