def from_tensorflow()

in nnvm/python/nnvm/frontend/tensorflow.py [0:0]


    def from_tensorflow(self, graph, layout="NHWC", shape=None, outputs=None):
        """Construct nnvm nodes from tensorflow graph definition - GraphDef.

        Follow the tensorflow graph definition to parse and convert it to NNVM.
        Some of the assumptions listed below.

            -> All Placeholders/PlaceholderWithDefaults are considered as graph input.
            -> All Const nodes are params.
            -> Last node is assumed as graph output.
            -> _output_shapes : Graph should be frozen with add_shapes=True.
                                Or user can pass input shape dictionary optionally.
            -> DecodeJpeg, ResizeBilinear: These are dummy operators.
                                           Hence user should handle preprocessing outside.
            -> CheckNumerics: No implementation as of now for this.
                              Just copies input to output.

        Parameters
        ----------
        graph : tensorflow graph definition object
            The loaded tensorflow GraphDef

        layout : target layout to be used (Optional)
            NCHW only supported now to enable NHWC models on GPU.

        shape : Dictionary of input dimensions (Optional)
            Graph level input shape dictionary.

        outputs : List of output tensor names (Optional)
            if not specified then the last node is assumed as graph output.

        Returns
        -------
        sym : nnvm.sym.Symbol
            The returned nnvm symbol
        params : dict
            A dict of name: tvm.nd.array pairs, used as pretrained weights
        """

        try:
            from tensorflow.python.framework import tensor_util
        except ImportError as e:
            raise ImportError(
                "Unable to import tensorflow which is required {}".format(e))

        missing_operators = self._parse_import_prerequisites(graph)

        if missing_operators:
            msg = 'The following operators are not supported in frontend TensorFlow: {}'
            ops = str(list(missing_operators)).strip('[,]')
            raise tvm.error.OpNotImplemented(msg.format(ops))

        for node in graph.node:
            if node.op == 'Placeholder' or node.op == 'PlaceholderWithDefault':
                # Give priority to user argument.
                if shape and node.name in shape:
                    self._input_shapes[node.name] = list(shape[node.name])
                else:
                    self._input_shapes[node.name] = \
                        tensor_util.TensorShapeProtoToList(node.attr['shape'].shape)
                    for idx, dim in enumerate(self._input_shapes[node.name]):
                        if dim < 0:
                            self._input_shapes[node.name][idx] = 1
                            warnings.warn("Use 1 instead of -1 in shape of operator %s."
                                          % node.name)

                self._nodes[node.name] = _sym.Variable(name=node.name,
                                                       shape=self._input_shapes[node.name])
                self._output_shapes[node.name] = [self._input_shapes[node.name]]
                self._outputs_are_0d[node.name] = [ \
                    not tshape if isinstance(tshape, list) else False \
                    for tshape in self._output_shapes[node.name]]

            # Ignore user's input shape for Non placeholder
            elif node.op == 'Const':
                tensor_value = node.attr['value'].tensor
                self._input_shapes[node.name] = \
                    tensor_util.TensorShapeProtoToList(tensor_value.tensor_shape)
                if shape and node.name in shape:
                    warnings.warn("Ignore the passed shape. "
                                  "Shape in graphdef will be used for operator %s." % node.name)

        final_op = None
        # Parse the nodes to re-create TF graph using Symbol API of NNVM
        for node in graph.node:
            # Tensorflow doesn't have separate list for params extraction.
            # Operator name 'Const' is treated as a parameter to build NNVM params dict.

            input_shapes = {}
            input_0d_mismatch = set()
            attr = self._parse_attr(node.attr)

            #  Variable converted to Const will not have only value attr
            if 'value' in attr and node.op == 'Const':
                self._output_shapes[node.name] = [self._input_shapes[node.name]]
            elif shape and node.name in shape:
                # Give priority to user argument.
                self._output_shapes[node.name] = [shape[node.name]]
            elif node.op == 'Placeholder' or node.op == 'PlaceholderWithDefault':
                self._output_shapes[node.name] = [self._input_shapes[node.name]]
            elif '_output_shapes' in attr:
                self._output_shapes[node.name] = \
                    [tensor_util.TensorShapeProtoToList(tshape) \
                    for tshape in attr['_output_shapes']]
            else:
                # Keep the list indexable to avoid key error.
                # Actual value will be filled after node creation.
                # Will infer shapes if the graph is not frozen with add_shapes=True
                self._output_shapes[node.name] = [None]

            self._outputs_are_0d[node.name] = [ \
                not tshape if isinstance(tshape, list) else False \
                for tshape in self._output_shapes[node.name]]

            if node.op == "Placeholder" or node.op == 'PlaceholderWithDefault':
                self._nodes[node.name] = _sym.Variable(name=node.name,
                                                       shape=self._input_shapes[node.name])

            elif node.op == "Const":
                # All Const nodes are Param nodes, lets parse
                self._num_param += 1
                for key, value in node.attr.items():
                    self._parse_param(key, value, node.name)
                if node.name not in self._nodes:
                    raise NotImplementedError( \
                        "Const {} couldn't be converted to Param.".format(node.name))

                attr = self._parse_attr(node.attr)

            elif node.op != "Placeholder":
                # Pass the parsed shapes instead
                attr["_output_shapes"] = output_shapes = self._output_shapes[node.name]

                # Pass the node name too in attr
                attr["_node_name"] = node.name

                # Pass the target layout
                attr["_target_layout"] = layout

                # Fill shapes for all inputs in a list
                inputs = []
                for i in node.input:
                    # Some TensorFlow operators internally maintain execution layers
                    # and their output name includes the layer number along with
                    # graph node name. E.g. the node name is 'Model/RNN/cell_0/RnnCell', but the
                    # output tensor name is 'Model/RNN/cell_0/RnnCell:0'. In this case,
                    # the number has to be ignored for single-output nodes.
                    # On the other hand, for multi-output nodes the number is the output index,
                    # and the lack of the number implies 0.
                    tensor_name = i.split(':')
                    node_name = tensor_name[0]
                    if node_name in self._nodes:
                        in_sym = self._nodes[node_name]
                        if len(in_sym.list_output_names()) > 1:
                            tensor_slot = int(tensor_name[1]) if len(tensor_name) > 1 else 0
                            in_sym = in_sym[tensor_slot]
                            input_shape = self._output_shapes[node_name][tensor_slot]
                        else:
                            tensor_slot = 0
                            input_shape = self._output_shapes[node_name][0]
                        inputs.append(in_sym)
                        input_shapes[in_sym] = input_shape
                        # This means the node is 1d in NNVM and 0d in TF.
                        # See `_expand_dims_0d_aware`.
                        if self._outputs_are_0d[node_name][tensor_slot] and input_shape:
                            input_0d_mismatch.add(in_sym)
                attr['_input_shapes'] = input_shapes
                attr['_input_0d_mismatch'] = input_0d_mismatch

                inputs = self._fix_extranodes(node.op, attr, inputs)
                op = self._convert_operator(node.op, inputs, attr, graph)

                # Check if op is converted to param
                if isinstance(op, np.ndarray):
                    self._params[node.name] = tvm.nd.array(op)
                    op = _sym.Variable(name=node.name,
                                       shape=self._params[node.name].shape)

                # Assuming only one output.
                self._nodes[node.name] = op
                final_op = op

                # Infer shapes even without specifying "add_shapes=True"
                if output_shapes == [None]:
                    g = _graph.create(final_op)
                    self._output_shapes[node.name] = \
                        list(graph_util.infer_shape(g, **self._input_shapes))[-1]

                if self._output_shapes[node.name] and shape and node.name in shape:
                    assert self._output_shapes[node.name] == list(shape[node.name])

            # Infer shapes if passed explicitely
            node_output = self._nodes[node.name]
            if shape and (not self._output_shapes[node.name][0]
                          or -1 in self._output_shapes[node.name][0]):
                g = _graph.create(node_output)
                shape_dict = {k: v.shape for k, v in self._params.items()}
                shape_dict.update(shape)
                _, out_shapes = graph_util.infer_shape(g, **shape_dict)
                self._output_shapes[node.name] = out_shapes

        out = []
        if outputs is None:
            out.append(final_op)
        else:
            for out_name in outputs:
                if ":" in out_name:
                    out_name, out_num = out_name.split(":")
                    out_num = int(out_num)
                    out.append(self._nodes[out_name][out_num])
                else:
                    out.append(self._nodes[out_name])

        #Add the RNN outputs also with 'head' nodes of the nnvm graph
        if self._num_rnn_layer:
            out_rnn = _sym.concatenate(*self._out_rnn, axis=0)
            out.append(out_rnn)

        if isinstance(out, list):
            out = _sym.Group(out) if len(out) > 1 else out[0]

        return out, self._params