in tensorflow_similarity/classification_metrics/precision.py [0:0]
def compute(self,
tp: FloatTensor,
fp: FloatTensor,
tn: FloatTensor,
fn: FloatTensor,
count: int) -> FloatTensor:
"""Compute the classification metric.
The `compute()` method supports computing the metric for a set of
values, where each value represents the counts at a specific distance
threshold.
Args:
tp: A 1D FloatTensor containing the count of True Positives at each
distance threshold.
fp: A 1D FloatTensor containing the count of False Positives at
each distance threshold.
tn: A 1D FloatTensor containing the count of True Negatives at each
distance threshold.
fn: A 1D FloatTensor containing the count of False Negatives at
each distance threshold.
count: The total number of queries
Returns:
A 1D FloatTensor containing the metric at each distance threshold.
"""
p: FloatTensor = tf.math.divide_no_nan(tp, tp + fp)
# If all queries return empty result sets we have a recall of zero. In
# this case the precision should be 1.0 (see
# https://nlp.stanford.edu/IR-book/html/htmledition/evaluation-of-ranked-retrieval-results-1.html#fig:precision-recall).
# The following accounts for the and sets the first precision value to
# 1.0 if the first recall and precision are both zero.
if (tp + fp)[0] == 0.0 and len(p) > 1:
initial_precision = tf.constant(
[tf.constant([1.0]), tf.zeros(len(p)-1)],
axis=0
)
p = p + initial_precision
return p