in tensorflow_compression/python/layers/signal_conv.py [0:0]
def call(self, inputs) -> tf.Tensor:
inputs = tf.convert_to_tensor(inputs, dtype=self.dtype)
if inputs.shape.rank != self._rank + 2:
raise ValueError(f"Input tensor must have rank {self._rank + 2}, "
f"received shape {inputs.shape}.")
outputs = inputs
# Not for all possible combinations of (`kernel_support`, `corr`,
# `strides_up`, `strides_down`) TF ops exist. We implement some additional
# combinations by manipulating the kernels and toggling `corr`.
kernel = self.kernel
corr = self.corr
# If a convolution with no upsampling is desired, we flip the kernels and
# use cross correlation to implement it, provided the kernels are odd-length
# in every dimension (with even-length kernels, the boundary handling
# would have to change).
if (not corr and
all(s == 1 for s in self.strides_up) and
all(s % 2 == 1 for s in self.kernel_support)):
corr = True
slices = self._rank * (slice(None, None, -1),) + 2 * (slice(None),)
kernel = kernel[slices]
# Similarly, we can implement a cross correlation using convolutions.
# However, we do this only if upsampling is requested, as we are potentially
# wasting computation in the boundaries whenever we call the transpose ops.
elif (corr and
any(s != 1 for s in self.strides_up) and
all(s % 2 == 1 for s in self.kernel_support)):
corr = False
slices = self._rank * (slice(None, None, -1),) + 2 * (slice(None),)
kernel = kernel[slices]
# Compute amount of necessary padding, and determine whether to use built-in
# padding or to pre-pad with a separate op.
if self.padding == "valid":
padding = prepadding = self._rank * ((0, 0),)
else: # same_*
padding = padding_ops.same_padding_for_kernel(
self.kernel_support, corr, self.strides_up)
if (self.padding == "same_zeros" and
not self.channel_separable and
1 <= self._rank <= 2 and
self.use_explicit):
# Don't pre-pad and use built-in EXPLICIT mode.
prepadding = self._rank * ((0, 0),)
else:
# Pre-pad and then use built-in valid padding mode.
outputs = tf.pad(
outputs, self._padded_tuple(padding, (0, 0)), self._pad_mode)
prepadding = padding
padding = self._rank * ((0, 0),)
# Compute the convolution/correlation. Prefer EXPLICIT padding ops where
# possible, but don't use them to implement VALID padding.
if (corr and
all(s == 1 for s in self.strides_up) and
not self.channel_separable and
1 <= self._rank <= 2 and
not all(p[0] == p[1] == 0 for p in padding)):
outputs = self._correlate_down_explicit(outputs, kernel, padding)
elif (corr and
all(s == 1 for s in self.strides_up) and
all(p[0] == p[1] == 0 for p in padding)):
outputs = self._correlate_down_valid(outputs, kernel)
elif (not corr and
not self.channel_separable and
1 <= self._rank <= 2 and
self.use_explicit):
outputs = self._up_convolve_transpose_explicit(
outputs, kernel, prepadding)
elif not corr:
outputs = self._up_convolve_transpose_valid(
outputs, kernel, prepadding)
else:
self._raise_notimplemented()
# Now, add bias if requested.
if self.use_bias:
bias = self.bias
if self.data_format == "channels_first":
# As of Mar 2017, direct addition is significantly slower than
# bias_add when computing gradients.
if self._rank == 1:
# tf.nn.bias_add does not accept a 1D input tensor.
outputs = tf.expand_dims(outputs, 2)
outputs = tf.nn.bias_add(outputs, bias, data_format="NCHW")
outputs = tf.squeeze(outputs, [2])
elif self._rank == 2:
outputs = tf.nn.bias_add(outputs, bias, data_format="NCHW")
elif self._rank >= 3:
shape = tf.shape(outputs)
outputs = tf.reshape(
outputs, tf.concat([shape[:3], [-1]], axis=0))
outputs = tf.nn.bias_add(outputs, bias, data_format="NCHW")
outputs = tf.reshape(outputs, shape)
else:
outputs = tf.nn.bias_add(outputs, bias)
# Finally, pass through activation function if requested.
if self.activation is not None:
outputs = self.activation(outputs)
return outputs