tinynn/converter/operators/torch/aten.py [2859:2897]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    def parse(self, node, attrs, args, graph_converter):
        super().parse(node, attrs, args, graph_converter)

        self.run(node)
        input_dims = self.input_tensors[0].dim()

        dims = self.input_tensors[args['dim']] if 'dim' in args else list(range(input_dims))
        keep_dims = self.input_tensors[args['keepdim']] if 'keepdim' in args else False
        unbiased = self.input_tensors[args['unbiased']] if 'unbiased' in args else True
        correction = self.input_tensors[args['correction']] if 'correction' in args else 1

        for i in range(len(dims)):
            if dims[i] < 0:
                dims[i] += input_dims

        input_tensor = self.find_or_create_input(0, graph_converter)
        output_tensor = self.to_tfl_tensors(self.output_names, self.output_tensors)[0]

        ops = []

        sample_dims = [input_tensor.shape[i] for i in range(input_dims) if i in dims]
        samples = np.prod(sample_dims, dtype='float32')
        if unbiased and correction != 0:
            samples -= correction
        samples = samples.astype('float32')
        samples_tensor = self.create_attr_tensor(samples)

        dims_tensor = self.create_attr_tensor(np.array(dims, dtype='int32'))
        mean_tensor = self.create_transform_tensor(np.mean(input_tensor.tensor, axis=tuple(dims), keepdims=True))
        ops.append(tfl.MeanOperator([input_tensor, dims_tensor], [mean_tensor], keepDims=True))

        squared_diff = self.create_transform_tensor(np.power(input_tensor.tensor - mean_tensor.tensor, 2))
        ops.append(tfl.SquaredDifferenceOperator([input_tensor, mean_tensor], [squared_diff]))

        if unbiased and correction != 0:
            squared_diff_sum = self.create_transform_tensor(
                np.sum(squared_diff.tensor, axis=tuple(dims), keepdims=keep_dims)
            )
            ops.append(tfl.SumOperator([squared_diff, dims_tensor], [squared_diff_sum], keepDims=keep_dims))
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



tinynn/converter/operators/torch/aten.py [2907:2945]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    def parse(self, node, attrs, args, graph_converter):
        super().parse(node, attrs, args, graph_converter)

        self.run(node)
        input_dims = self.input_tensors[0].dim()

        dims = self.input_tensors[args['dim']] if 'dim' in args else list(range(input_dims))
        keep_dims = self.input_tensors[args['keepdim']] if 'keepdim' in args else False
        unbiased = self.input_tensors[args['unbiased']] if 'unbiased' in args else True
        correction = self.input_tensors[args['correction']] if 'correction' in args else 1

        for i in range(len(dims)):
            if dims[i] < 0:
                dims[i] += input_dims

        input_tensor = self.find_or_create_input(0, graph_converter)
        output_tensor = self.to_tfl_tensors(self.output_names, self.output_tensors)[0]

        ops = []

        sample_dims = [input_tensor.shape[i] for i in range(input_dims) if i in dims]
        samples = np.prod(sample_dims, dtype='float32')
        if unbiased and correction != 0:
            samples -= correction
        samples = samples.astype('float32')
        samples_tensor = self.create_attr_tensor(samples)

        dims_tensor = self.create_attr_tensor(np.array(dims, dtype='int32'))
        mean_tensor = self.create_transform_tensor(np.mean(input_tensor.tensor, axis=tuple(dims), keepdims=True))
        ops.append(tfl.MeanOperator([input_tensor, dims_tensor], [mean_tensor], keepDims=True))

        squared_diff = self.create_transform_tensor(np.power(input_tensor.tensor - mean_tensor.tensor, 2))
        ops.append(tfl.SquaredDifferenceOperator([input_tensor, mean_tensor], [squared_diff]))

        if unbiased and correction != 0:
            squared_diff_sum = self.create_transform_tensor(
                np.sum(squared_diff.tensor, axis=tuple(dims), keepdims=keep_dims)
            )
            ops.append(tfl.SumOperator([squared_diff, dims_tensor], [squared_diff_sum], keepDims=keep_dims))
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



