tensorflow_model_optimization/python/core/quantization/keras/default_8bit/default_8bit_transforms.py [148:165]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
  def pattern(self):
    return LayerPattern(
        'BatchNormalization|SyncBatchNormalization',
        inputs=[LayerPattern(
            'Lambda', config={'name': 'sepconv1d_squeeze.*'},
            inputs=[LayerPattern(
                'Conv2D|DepthwiseConv2D',
                config={'activation': 'linear'})])])

  def replacement(self, match_layer):
    bn_layer_node = match_layer
    reshape_layer_node = bn_layer_node.input_layers[0]
    conv_layer_node = reshape_layer_node.input_layers[0]

    return self._replace(bn_layer_node, conv_layer_node)


class Conv2DBatchNormReLUQuantize(Conv2DBatchNormQuantize):
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



tensorflow_model_optimization/python/core/quantization/keras/experimental/default_n_bit/default_n_bit_transforms.py [154:171]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
  def pattern(self):
    return LayerPattern(
        'BatchNormalization|SyncBatchNormalization',
        inputs=[LayerPattern(
            'Lambda', config={'name': 'sepconv1d_squeeze.*'},
            inputs=[LayerPattern(
                'Conv2D|DepthwiseConv2D',
                config={'activation': 'linear'})])])

  def replacement(self, match_layer):
    bn_layer_node = match_layer
    reshape_layer_node = bn_layer_node.input_layers[0]
    conv_layer_node = reshape_layer_node.input_layers[0]

    return self._replace(bn_layer_node, conv_layer_node)


class Conv2DBatchNormReLUQuantize(Conv2DBatchNormQuantize):
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



