in onnxconverter_common/onnx_fx.py [0:0]
def apply_invoke_inline(self, ox_graph, input_map, output_map):
input_map = dict(input_map)
output_map = dict(output_map)
# input_map: [name in graph] -> actual input Tensor
# output_map: [name in graph] -> desired name for the result, or None
f_name = "invoke_inline_" + ox_graph.name
for graph_output in output_map.keys(): # @TODO: use proper comprehensions
output_map[graph_output] = self._process_outputs(
output_map[graph_output], name=f_name)[0]
outputs = list(output_map.values()) # remember these; these are the outputs of this invocation
if len(outputs) == 1:
outputs = outputs[0] # single output
for graph_input in input_map.keys():
input_map[graph_input] = self._process_inputs(
[input_map[graph_input].name], name=f_name)[0]
_logger.debug(f_name, input_map, output_map)
existing_node_names = {item.name: item for item in self._container.nodes}
existing_initializer_names = {item.name: item for item in self._container.initializers}
existing_value_infos = {item.name: item for item in self._container.value_info}
# collect all outputs from the graph we are expanding, so that we can map them to unique names
# @TODO: This will also map some code that may be shared later on. Leave that to the optimizer.
node_map = dict()
for node in ox_graph.node:
if not node.input: # leaves do not need to be mapped; they can just get uniq'ed
continue
for output in node.output:
if output in output_map: # this is an actual output that already has been mapped
continue
uniq_name = onnx_ops._create_name_or_use_existing_one(self._scope, self._generate_name(output, None),
None)
output_map[output] = uniq_name
uniq_node_name = onnx_ops._create_name_or_use_existing_one(self._scope,
self._generate_name(node.name, None), None)
node_map[output] = uniq_node_name
def map_tensors(args, arg_map):
for i in range(len(args)):
if args[i] in arg_map:
_logger.debug("Remapping", args[i], "to", arg_map[args[i]])
args[i] = arg_map[args[i]]
for node in ox_graph.node:
node = copy.deepcopy(node) # since we patch, we must clone it first
map_tensors(node.input, input_map) # patch the input references to the function arguments
map_tensors(node.output, output_map) # rename the outputs to unique ones
map_tensors(node.input, output_map) # outputs may be inputs to other nodes in this graph
if node.name in node_map:
node.name = node_map[node.name]
if node.name in existing_node_names:
str_node = str(node)
str_other = str(existing_node_names[node.name])
if str_node != str_other:
# must be the same, otherwise we have inconsistent dups, e.g. in input models
_logger.info("Duplicate node name with inconsistent nodes:\n", node, "vs:\n",
existing_node_names[node.name])
assert str_node == str_other
continue
self._container.nodes.append(node)
for initializer in ox_graph.initializer:
if initializer.name in existing_initializer_names: # @TODO: check if they are the same
_logger.info("Duplicate initializer name skipped:", initializer.name)
continue
if initializer.name in output_map: # technically, the whole function could be a lonely initializer
# _logger.debug("Replacing:", initializer.name, initializer.shape)
initializer = copy.deepcopy(initializer)
initializer.name = output_map[initializer.name]
# _logger.debug(initializer.name)
self._container.initializers.append(initializer)
for value_info in ox_graph.value_info:
if value_info.name in existing_value_infos: # @TODO: check if they are the same
_logger.info("Duplicate value_info name skipped:", value_info.name)
continue
# @TODO: Not sure what must be mapped, and how
_logger.debug(value_info)
self._container.value_info.append(value_info)
return self._output_names_to_tensors(outputs) # note: outputs is either a string or a list of strings