def parse()

in tinynn/converter/operators/torch/aten.py [0:0]


    def parse(self, node, attrs, args, graph_converter):
        super().parse(node, attrs, args, graph_converter)

        self.run(node)
        input_tensor = self.find_or_create_input(0, graph_converter)
        dim, start, end, step = self.input_tensors[1:]

        if start is None:
            start = 0

        if end is None:
            end = input_tensor.tensor.shape[dim]

        if start < 0:
            start += input_tensor.tensor.shape[dim]

        if end < 0:
            end += input_tensor.tensor.shape[dim]

        if dim < 0:
            dim += input_tensor.tensor.ndim

        if start > end:
            end = start

        if end >= input_tensor.tensor.shape[dim]:
            end = input_tensor.tensor.shape[dim]

        starts = np.zeros(input_tensor.tensor.ndim, dtype='int32')
        starts[dim] = start

        if self.input_names[2] in graph_converter.constant_mapping:
            start_t = graph_converter.constant_mapping[self.input_names[2]]
            new_shape_arr = np.array((1,), dtype='int32')
            new_shape_tensor = self.create_attr_tensor(new_shape_arr)
            start_reshaped = self.create_transform_tensor(np.reshape(start_t.tensor, new_shape_arr))
            graph_converter.add_operator(
                tfl.ReshapeOperator([start_t, new_shape_tensor], [start_reshaped], new_shape_arr)
            )

            start_casted = self.create_transform_tensor(start_reshaped.tensor.astype('int32'))
            graph_converter.add_operator(
                tfl.CastOperator(
                    [start_reshaped],
                    [start_casted],
                    tfl.numpy_tflite_dtype_mappings[str(start_reshaped.dtype)],
                    tfl.numpy_tflite_dtype_mappings[str(start_casted.dtype)],
                )
            )

            start_tensor = self.create_transform_tensor(starts)
            starts_left = starts[:dim]
            starts_right = starts[dim + 1 :]
            starts_tensors = []
            if len(starts_left) > 0:
                starts_tensors.append(self.create_attr_tensor(starts_left))
            starts_tensors.append(start_casted)
            if len(starts_right) > 0:
                starts_tensors.append(self.create_attr_tensor(starts_right))
            if len(starts_tensors) > 1:
                graph_converter.add_operator(tfl.ConcatenationOperator(starts_tensors, [start_tensor], 0))
            else:
                start_tensor = starts_tensors[0]
        else:
            start_tensor = self.create_attr_tensor(starts)

        ends = np.array(input_tensor.tensor.shape, dtype='int32')
        if step != 1 or start_tensor.buffer is None or self.input_names[3] in graph_converter.constant_mapping:
            ends[dim] = end
        else:
            ends[dim] = end - start

        if self.input_names[3] in graph_converter.constant_mapping:
            end_t = graph_converter.constant_mapping[self.input_names[3]]
            new_shape_arr = np.array((1,), dtype='int32')
            new_shape_tensor = self.create_attr_tensor(new_shape_arr)
            end_reshaped = self.create_transform_tensor(np.reshape(end_t.tensor, new_shape_arr))
            graph_converter.add_operator(tfl.ReshapeOperator([end_t, new_shape_tensor], [end_reshaped], new_shape_arr))

            end_casted = self.create_transform_tensor(end_reshaped.tensor.astype('int32'))
            graph_converter.add_operator(
                tfl.CastOperator(
                    [end_reshaped],
                    [end_casted],
                    tfl.numpy_tflite_dtype_mappings[str(end_reshaped.dtype)],
                    tfl.numpy_tflite_dtype_mappings[str(end_casted.dtype)],
                )
            )

            end_tensor = self.create_transform_tensor(ends)
            ends_left = ends[:dim]
            ends_right = ends[dim + 1 :]
            ends_tensors = []
            if len(ends_left) > 0:
                ends_tensors.append(self.create_attr_tensor(ends_left))
            ends_tensors.append(end_casted)
            if len(ends_right) > 0:
                ends_tensors.append(self.create_attr_tensor(ends_right))
            if len(ends_tensors) > 1:
                graph_converter.add_operator(tfl.ConcatenationOperator(ends_tensors, [end_tensor], 0))
            else:
                end_tensor = ends_tensors[0]
        else:
            end_tensor = self.create_attr_tensor(ends)

        if step != 1 or start_tensor.buffer is None or end_tensor.buffer is None:
            strides = np.ones(input_tensor.tensor.ndim, dtype='int32')
            strides[dim] = step

            stride_tensor = self.create_attr_tensor(strides)

            inputs = [input_tensor, start_tensor, end_tensor, stride_tensor]
            outputs = self.to_tfl_tensors(self.output_names, self.output_tensors)

            graph_converter.add_operator(tfl.StridedSliceOperator(inputs, outputs))
        else:
            size_tensor = end_tensor
            inputs = [input_tensor, start_tensor, size_tensor]
            outputs = self.to_tfl_tensors(self.output_names, self.output_tensors)

            graph_converter.add_operator(tfl.SliceOperator(inputs, outputs))