in tensorflow_model_optimization/python/core/quantization/keras/default_8bit/default_8bit_transforms.py [0:0]
def replacement(self, match_layer):
if _has_custom_quantize_config(match_layer):
return match_layer
sepconv1d_layer = match_layer.layer
sepconv1d_config = sepconv1d_layer['config']
sepconv1d_weights = list(match_layer.weights.values())
padding = sepconv1d_config['padding']
# SepConv2D does not accept causal padding, and SepConv1D has some special
# handling for it.
# TODO(pulkitb): Add support for causal padding.
if padding == 'causal':
raise ValueError('SeparableConv1D with causal padding is not supported.')
# TODO(pulkitb): Handle other base_layer args such as dtype, input_dim etc.
sepconv2d_layer = tf.keras.layers.SeparableConv2D(
filters=sepconv1d_config['filters'],
kernel_size=(1,) + _normalize_tuple(sepconv1d_config['kernel_size']),
strides=_normalize_tuple(sepconv1d_config['strides']) * 2,
padding=padding,
data_format=sepconv1d_config['data_format'],
dilation_rate=(1,) + _normalize_tuple(
sepconv1d_config['dilation_rate']),
depth_multiplier=sepconv1d_config['depth_multiplier'],
activation=sepconv1d_config['activation'],
use_bias=sepconv1d_config['use_bias'],
depthwise_initializer=sepconv1d_config['depthwise_initializer'],
pointwise_initializer=sepconv1d_config['pointwise_initializer'],
bias_initializer=sepconv1d_config['bias_initializer'],
depthwise_regularizer=sepconv1d_config['depthwise_regularizer'],
pointwise_regularizer=sepconv1d_config['pointwise_regularizer'],
bias_regularizer=sepconv1d_config['bias_regularizer'],
activity_regularizer=sepconv1d_config['activity_regularizer'],
depthwise_constraint=sepconv1d_config['depthwise_constraint'],
pointwise_constraint=sepconv1d_config['pointwise_constraint'],
bias_constraint=sepconv1d_config['bias_constraint'],
# TODO(pulkitb): Rethink what to do for name. Using the same name leads
# to confusion, since it's typically separable_conv1d
name=sepconv1d_config['name'] + '_QAT_SepConv2D',
trainable=sepconv1d_config['trainable']
)
sepconv2d_weights = collections.OrderedDict()
sepconv2d_weights['depthwise_kernel:0'] = np.expand_dims(
sepconv1d_weights[0], 0)
sepconv2d_weights['pointwise_kernel:0'] = np.expand_dims(
sepconv1d_weights[1], 0)
if sepconv1d_config['use_bias']:
sepconv2d_weights['bias:0'] = sepconv1d_weights[2]
if sepconv1d_config['data_format'] == 'channels_last':
spatial_dim = 1
else:
spatial_dim = 2
sepconv2d_layer_config = keras.layers.serialize(sepconv2d_layer)
sepconv2d_layer_config['name'] = sepconv2d_layer.name
# Needed to ensure these new layers are considered for quantization.
sepconv2d_metadata = {'quantize_config': None}
# TODO(pulkitb): Consider moving from Lambda to custom ExpandDims/Squeeze.
# Layer before SeparableConv2D which expands input tensors to match 2D.
expand_layer = tf.keras.layers.Lambda(
lambda x: tf.expand_dims(x, spatial_dim),
name=self._get_name('sepconv1d_expand'))
expand_layer_config = keras.layers.serialize(expand_layer)
expand_layer_config['name'] = expand_layer.name
expand_layer_metadata = {
'quantize_config': default_8bit_quantize_configs.NoOpQuantizeConfig()}
squeeze_layer = tf.keras.layers.Lambda(
lambda x: tf.squeeze(x, [spatial_dim]),
name=self._get_name('sepconv1d_squeeze'))
squeeze_layer_config = keras.layers.serialize(squeeze_layer)
squeeze_layer_config['name'] = squeeze_layer.name
squeeze_layer_metadata = {
'quantize_config': default_8bit_quantize_configs.NoOpQuantizeConfig()}
return LayerNode(
squeeze_layer_config,
metadata=squeeze_layer_metadata,
input_layers=[LayerNode(
sepconv2d_layer_config,
weights=sepconv2d_weights,
metadata=sepconv2d_metadata,
input_layers=[LayerNode(
expand_layer_config, metadata=expand_layer_metadata)]
)])