in tinynn/converter/operators/torch/base.py [0:0]
def handle_reduce(self, converter_class, input_args, graph_converter, transpose_opt, *args, **kwargs):
input_tensor = self.find_or_create_input(0, graph_converter)
if 'dim' in input_args and 'keepdim' in input_args:
dims, keep_dim = self.input_tensors[1:3]
if type(dims) not in (list, tuple):
dims = [dims]
if len(dims) == 0:
dims = list(range(input_tensor.tensor.ndim))
self.output_tensors[0] = self.output_tensors[0].view(1)
else:
dims = list(range(input_tensor.tensor.ndim))
keep_dim = False
self.output_tensors[0] = self.output_tensors[0].view(1)
for idx, dim in enumerate(dims):
if dim < 0:
dims[idx] += input_tensor.tensor.ndim
ops = []
transpose = False
if transpose_opt:
# For some ops the codepath is optimized for nhwc.
# For example, for tfl.Mean, if it is a pooling 2d op, consider wrapping it with transposes
if len(input_tensor.shape) == 4 and keep_dim in (1, True):
if dims == [2, 3]:
dims = [1, 2]
transpose = True
elif dims == [3, 2]:
dims = [2, 1]
transpose = True
dim_tensor = self.create_attr_tensor(np.array(dims, dtype='int32'))
inputs = [input_tensor, dim_tensor]
outputs = self.to_tfl_tensors(self.output_names, self.output_tensors)
if len(outputs) > 1:
log.warning(
'Reduce ops like `torch.min` have multiple outputs. However, only the first '
'output will be preserved in our converter. If you need that tensor, please '
'use the `torch.argmin` instead.'
)
outputs = outputs[:1]
if (
hasattr(converter_class, '__init__')
and 'keepDims' in inspect.signature(converter_class.__init__).parameters
):
ops.append(converter_class(inputs, outputs, keep_dim, *args, **kwargs))
else:
if keep_dim:
output_tensor = outputs[0]
transform = self.create_transform_tensor(np.squeeze(output_tensor.tensor, tuple(dims)))
ops.append(converter_class(inputs, [transform], *args, **kwargs))
shape_tensor = self.create_attr_tensor(np.array(output_tensor.shape, dtype='int32'))
ops.append(tfl.ReshapeOperator([transform, shape_tensor], [output_tensor], shape_tensor.tensor))
else:
ops.append(converter_class(inputs, outputs, *args, **kwargs))
if transpose:
if keep_dim:
ops = self.wrap_ops_with_nhwc_nchw_transposes(ops)
else:
orig_input = ops[0].inputs[0]
nchw2nhwc_perm = np.array([0, 2, 3, 1], dtype='int32')
nchw2nhwc_perm_tensor = self.create_attr_tensor(nchw2nhwc_perm)
new_input = self.create_transform_tensor(
np.transpose(orig_input.tensor, nchw2nhwc_perm), quantization=orig_input.quantization
)
nchw2nhwc_transpose = tfl.TransposeOperator([orig_input, nchw2nhwc_perm_tensor], [new_input])
ops[0].inputs[0] = new_input
ops.insert(0, nchw2nhwc_transpose)
for op in ops:
graph_converter.add_operator(op)