in adanet/core/evaluator.py [0:0]
def evaluate(self, sess, ensemble_metrics):
"""Evaluates the given AdaNet objectives on the data from `input_fn`.
The candidates are fed the same batches of features and labels as
provided by `input_fn`, and their losses are computed and summed over
`steps` batches.
Args:
sess: `Session` instance with most recent variable values loaded.
ensemble_metrics: A list dictionaries of `tf.metrics` for each candidate
ensemble.
Returns:
List of evaluated metrics.
"""
evals_completed = 0
if self.steps is None:
logging_frequency = 1000
elif self.steps < 10:
logging_frequency = 1
else:
logging_frequency = math.floor(self.steps / 10.)
objective_metrics = [em[self._metric_name] for em in ensemble_metrics]
sess.run(tf_compat.v1.local_variables_initializer())
while True:
if self.steps is not None and evals_completed == self.steps:
break
try:
evals_completed += 1
if (evals_completed % logging_frequency == 0 or
self.steps == evals_completed):
logging.info("Ensemble evaluation [%d/%s]", evals_completed,
self.steps or "??")
sess.run(objective_metrics)
except tf.errors.OutOfRangeError:
logging.info("Encountered end of input after %d evaluations",
evals_completed)
break
# Evaluating the first element is idempotent for metric tuples.
return sess.run([metric[0] for metric in objective_metrics])