in model_card_toolkit/utils/tfx_util.py [0:0]
def annotate_eval_result_metrics(model_card: model_card_module.ModelCard,
eval_result: tfma.EvalResult):
"""Annotates model_card's PerformanceMetrics for every metric in eval_result.
Args:
model_card: The model card object.
eval_result: A `tfma.EvalResult`.
Raises:
ValueError: if eval_result is improperly formatted.
"""
def _parse_array_value(array: Dict[str, Any]) -> str:
data_type = array['dataType']
if data_type in _TYPE_FIELD_MAP:
type_field = _TYPE_FIELD_MAP[data_type]
return ', '.join([str(value) for value in array[type_field]])
else:
logging.warning('Received unexpected array %s', str(array))
return ''
for slice_repr, metrics_for_slice in (
eval_result.get_metrics_for_all_slices().items()):
# Parse the slice name
if not isinstance(slice_repr, tuple):
raise ValueError(
f'Expected EvalResult slices to be tuples; found {type(slice_repr)}')
slice_name = '_X_'.join(f'{a}_{b}' for a, b in slice_repr)
for metric_name, metric_value in metrics_for_slice.items():
# Parse the metric value
parsed_value = ''
if 'doubleValue' in metric_value:
parsed_value = metric_value['doubleValue']
elif 'boundedValue' in metric_value:
parsed_value = metric_value['boundedValue']['value']
elif 'arrayValue' in metric_value:
parsed_value = _parse_array_value(metric_value['arrayValue'])
else:
logging.warning(
'Expected doubleValue, boundedValue, or arrayValue; found %s',
metric_value.keys())
if parsed_value:
# Create the PerformanceMetric and append to the ModelCard
metric = model_card_module.PerformanceMetric(
type=metric_name, value=str(parsed_value), slice=slice_name)
model_card.quantitative_analysis.performance_metrics.append(metric)