in tensorflow_ranking/python/metrics.py [0:0]
def make_ranking_metric_fn(metric_key,
weights_feature_name=None,
topn=None,
name=None,
gain_fn=_DEFAULT_GAIN_FN,
rank_discount_fn=_DEFAULT_RANK_DISCOUNT_FN,
**kwargs):
"""Factory method to create a ranking metric function.
Args:
metric_key: A key in `RankingMetricKey`.
weights_feature_name: A `string` specifying the name of the weights feature
in `features` dict.
topn: An `integer` specifying the cutoff of how many items are considered in
the metric.
name: A `string` used as the name for this metric.
gain_fn: (function) Transforms labels. A method to calculate gain parameters
used in the definitions of the DCG and NDCG metrics, where the input is
the relevance label of the item. The gain is often defined to be of the
form 2^label-1.
rank_discount_fn: (function) The rank discount function. A method to define
the discount parameters used in the definitions of DCG and NDCG metrics,
where the input in the rank of item. The discount function is commonly
defined to be of the form log(rank+1).
**kwargs: Other keyword arguments (e.g. alpha, seed).
Returns:
A metric fn with the following Args:
* `labels`: A `Tensor` of the same shape as `predictions` representing
graded relevance.
* `predictions`: A `Tensor` with shape [batch_size, list_size]. Each value
is the ranking score of the corresponding example.
* `features`: A dict of `Tensor`s that contains all features.
"""
def _get_weights(features):
"""Get weights tensor from features and reshape it to 2-D if necessary."""
weights = None
if weights_feature_name:
weights = tf.convert_to_tensor(value=features[weights_feature_name])
# Convert weights to a 2-D Tensor.
weights = utils.reshape_to_2d(weights)
return weights
def _average_relevance_position_fn(labels, predictions, features):
"""Returns average relevance position as the metric."""
return average_relevance_position(
labels, predictions, weights=_get_weights(features), name=name)
def _mean_reciprocal_rank_fn(labels, predictions, features):
"""Returns mean reciprocal rank as the metric."""
return mean_reciprocal_rank(
labels,
predictions,
weights=_get_weights(features),
topn=topn,
name=name)
def _normalized_discounted_cumulative_gain_fn(labels, predictions, features):
"""Returns normalized discounted cumulative gain as the metric."""
return normalized_discounted_cumulative_gain(
labels,
predictions,
weights=_get_weights(features),
topn=topn,
name=name,
gain_fn=gain_fn,
rank_discount_fn=rank_discount_fn)
def _discounted_cumulative_gain_fn(labels, predictions, features):
"""Returns discounted cumulative gain as the metric."""
return discounted_cumulative_gain(
labels,
predictions,
weights=_get_weights(features),
topn=topn,
name=name,
gain_fn=gain_fn,
rank_discount_fn=rank_discount_fn)
def _precision_fn(labels, predictions, features):
"""Returns precision as the metric."""
return precision(
labels,
predictions,
weights=_get_weights(features),
topn=topn,
name=name)
def _recall_fn(labels, predictions, features):
"""Returns recall as the metric."""
return recall(
labels,
predictions,
weights=_get_weights(features),
topn=topn,
name=name)
def _mean_average_precision_fn(labels, predictions, features):
"""Returns mean average precision as the metric."""
return mean_average_precision(
labels,
predictions,
weights=_get_weights(features),
topn=topn,
name=name)
def _precision_ia_fn(labels, predictions, features):
"""Returns an intent-aware precision as the metric."""
return precision_ia(
labels,
predictions,
weights=_get_weights(features),
topn=topn,
name=name)
def _ordered_pair_accuracy_fn(labels, predictions, features):
"""Returns ordered pair accuracy as the metric."""
return ordered_pair_accuracy(
labels, predictions, weights=_get_weights(features), name=name)
def _alpha_discounted_cumulative_gain_fn(labels, predictions, features):
"""Returns alpha discounted cumulative gain as the metric."""
return alpha_discounted_cumulative_gain(
labels,
predictions,
weights=_get_weights(features),
topn=topn,
name=name,
rank_discount_fn=rank_discount_fn,
**kwargs)
def _binary_preference_fn(labels, predictions, features):
"""Returns binary preference as the metric."""
return binary_preference(
labels,
predictions,
weights=_get_weights(features),
topn=topn,
name=name,
**kwargs)
metric_fn_dict = {
RankingMetricKey.ARP: _average_relevance_position_fn,
RankingMetricKey.MRR: _mean_reciprocal_rank_fn,
RankingMetricKey.NDCG: _normalized_discounted_cumulative_gain_fn,
RankingMetricKey.DCG: _discounted_cumulative_gain_fn,
RankingMetricKey.RECALL: _recall_fn,
RankingMetricKey.PRECISION: _precision_fn,
RankingMetricKey.MAP: _mean_average_precision_fn,
RankingMetricKey.PRECISION_IA: _precision_ia_fn,
RankingMetricKey.ORDERED_PAIR_ACCURACY: _ordered_pair_accuracy_fn,
RankingMetricKey.ALPHA_DCG: _alpha_discounted_cumulative_gain_fn,
RankingMetricKey.BPREF: _binary_preference_fn,
}
assert metric_key in metric_fn_dict, ('metric_key %s not supported.' %
metric_key)
return metric_fn_dict[metric_key]