def _get_evaluation_result()

in tensorboard_plugin/tensorboard_plugin_fairness_indicators/plugin.py [0:0]


  def _get_evaluation_result(self, request):
    run = request.args.get('run')
    try:
      run = six.ensure_text(run)
    except (UnicodeDecodeError, AttributeError):
      pass

    data = []
    try:
      eval_result_output_dir = six.ensure_text(
          self._multiplexer.Tensors(run, FairnessIndicatorsPlugin.plugin_name)
          [0].tensor_proto.string_val[0])
      eval_result = tfma.load_eval_result(output_path=eval_result_output_dir)
      # TODO(b/141283811): Allow users to choose different model output names
      # and class keys in case of multi-output and multi-class model.
      data = widget_view.convert_slicing_metrics_to_ui_input(
          eval_result.slicing_metrics)
    except (KeyError, json_format.ParseError) as error:
      logging.info('Error while fetching evaluation data, %s', error)
    return http_util.Respond(request, data, content_type='application/json')