in tensorflow_fold/blocks/block_compiler.py [0:0]
def _eval(self, inp, feed_dict, session, tolist, use_while_loop):
"""Implements block.eval()."""
out_type = self.root.output_type
# Do a dry run to calculate max depth and the structure of the result.
# We need to create a pass-through op that takes all tensor types in
# out_type as inputs, to ensure that loom recognizes their type-shapes.
dry_result, max_depth = self._dry_run(inp)
if use_while_loop: max_depth = None
# We map the loom results 'dry_result' to their corresponding tensor types.
terminal_ts = []
out_type.for_each_terminal(lambda t, _: terminal_ts.append(t), dry_result)
tensor_ts = [t for t in terminal_ts if isinstance(t, tdt.TensorType)]
# If there are no tensor types or metrics then we can't do a wet
# run. Fortunately we don't need to.
if not (tensor_ts or self._ctx.metric_ops): return dry_result
# Now we can do a wet run to get the actual loom results.
# Create a custom passthrough op that handles the exact shape of dry_result.
self._init_loom(tensor_ts, max_depth)
weaver = self._wet.make_weaver()
wet_result = self.root._evaluate( # pylint: disable=protected-access
self._eval_ctx(weaver), inp)
# We need to flatten 'wet_result' so we can use it with the passthrough op.
tensor_results = [
r for r, t in zip(out_type.flatten(wet_result), terminal_ts)
if isinstance(t, tdt.TensorType)]
if tensor_results: # only create the op if we have tensor outputs
tensor_results = weaver.op(self._wet.tagging_op_name, tensor_results)
fd = weaver.build_feed_dict(tensor_results)
if feed_dict is not None: fd.update(feed_dict)
# Now we can evaluate the loom results, assuming we have a session.
if session is None:
session = tf.get_default_session()
if session is None:
raise ValueError('No default session is registered. Use `with '
'sess.as_default()` or pass a session to eval()')
_init_uninitialized(session)
tensor_outputs_and_metrics = session.run(
self.output_tensors + list(self.metric_tensors.values()), feed_dict=fd)
# Handle the tensor outputs.
tensor_outputs = tensor_outputs_and_metrics[:len(self.output_tensors)]
# Drop the first (batch) dimension.
tensor_outputs = (np.squeeze(array, 0) for array in tensor_outputs)
# If requested, convert arrays to lists.
if tolist: tensor_outputs = (array.tolist() for array in tensor_outputs)
# Merge with non-tensor outputs.
outputs = (next(tensor_outputs) if isinstance(t, tdt.TensorType) else dry
for t, dry in zip(terminal_ts, out_type.flatten(dry_result)))
# Unflatten to create a result with the correct shape.
result = out_type.unflatten(outputs, dry_result)
# Handle metrics.
metrics = tensor_outputs_and_metrics[len(self.output_tensors):]
metrics = {name: value.tolist() if tolist else value
for name, value in zip(self.metric_tensors, metrics)}
return (result, metrics) if metrics else result