in tfx_bsl/beam/run_inference.py [0:0]
def _make_io_tensor_spec(self) -> _IOTensorSpec:
# Pre process functions will validate for each signature.
io_tensor_specs = []
for signature in self._signatures:
if len(signature.signature_def.inputs) != 1:
raise ValueError('Signature should have 1 and only 1 inputs')
if (list(signature.signature_def.inputs.values())[0].dtype !=
tf.string.as_datatype_enum):
raise ValueError(
'Input dtype is expected to be %s, got %s' %
(tf.string.as_datatype_enum,
list(signature.signature_def.inputs.values())[0].dtype))
io_tensor_specs.append(_signature_pre_process(signature.signature_def))
input_tensor_name = ''
input_tensor_alias = ''
output_alias_tensor_names = {}
for io_tensor_spec in io_tensor_specs:
if not input_tensor_name:
input_tensor_name = io_tensor_spec.input_tensor_name
input_tensor_alias = io_tensor_spec.input_tensor_alias
elif input_tensor_name != io_tensor_spec.input_tensor_name:
raise ValueError('Input tensor must be the same for all Signatures.')
for alias, tensor_name in io_tensor_spec.output_alias_tensor_names.items(
):
output_alias_tensor_names[alias] = tensor_name
if (not output_alias_tensor_names or not input_tensor_name or
not input_tensor_alias):
raise ValueError('No valid fetch tensors or feed tensors.')
return _IOTensorSpec(input_tensor_alias, input_tensor_name,
output_alias_tensor_names)