tensorflow_model_optimization/python/core/quantization/keras/default_8bit/default_8bit_quantize_registry.py [72:180]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    quantize_registry.QuantizeRegistry, _RNNHelper):
  """QuantizationRegistry for built-in Keras classes for default 8-bit scheme."""

  # TODO(tfmot): expand layers test in quantize_functional_test.py
  # to add more layers to allowlist.
  _LAYER_QUANTIZE_INFO = [
      # Activation Layers
      _QuantizeInfo(layers.ReLU, [], [], True),
      _QuantizeInfo(layers.Softmax, [], []),
      # Enable once verified.
      # layers.ELU,
      _QuantizeInfo(layers.LeakyReLU, [], [], True),
      # layers.PReLU,
      # layers.ThresholdedReLU,

      # Convolution Layers
      # _QuantizeInfo(layers.Conv1D, ['kernel'], ['activation']),

      # layers.Conv2D is supported and handled in code below.
      # layers.DepthwiseConv2D is supported and handled in code below.

      # _QuantizeInfo(layers.Conv3D, ['kernel'], ['activation']),
      # _QuantizeInfo(layers.Conv3DTranspose, ['kernel'], ['activation']),
      _QuantizeInfo(layers.Concatenate, [], [], True),
      _no_quantize(layers.Cropping1D),
      _no_quantize(layers.Cropping2D),
      _no_quantize(layers.Cropping3D),
      # _no_quantize(layers.UpSampling1D),

      # TODO(tfmot): Reduce the quantization errors for bilinear interpolation
      # type for UpSampling2D op. UpSampling2D supports two interpolation types,
      # nearest and bilinear. we convert the op to ResizeBilnear integer op on
      # TFLite. This ResizeBilinear TFLite op only for input and output has the
      # same quantization parameters. (scale and zero_point) To do that, The
      # TFLite converter inserts quantization cast op right after the input to
      # match quantization params for the output. Current QAT doesn’t consider
      # this behavior yet, so now we have larger quantization errors than we
      # expected. We have to add support for it on QAT or change the TFLite
      # kernel op to support different quantization params for input and output.
      # (Note that the nearest case just copies the number so there’s no more
      # errors even if the quantization order is different.)
      _QuantizeInfo(layers.UpSampling2D, [], [], True),

      # _no_quantize(layers.UpSampling3D),
      _no_quantize(layers.ZeroPadding1D),
      _no_quantize(layers.ZeroPadding2D),
      # _no_quantize(layers.ZeroPadding3D),

      # Supported via modifications in Transforms.
      # layers.SeparableConv1D, layers.SeparableConv2D,

      # Core Layers
      _no_quantize(layers.ActivityRegularization),
      _QuantizeInfo(layers.Dense, ['kernel'], ['activation']),
      _no_quantize(layers.Dropout),
      _no_quantize(layers.Flatten),
      # _no_quantize(layers.Masking),
      _no_quantize(layers.Permute),
      # _no_quantize(layers.RepeatVector),
      _no_quantize(layers.Reshape),
      _no_quantize(layers.SpatialDropout1D),
      _no_quantize(layers.SpatialDropout2D),
      _no_quantize(layers.SpatialDropout3D),
      # layers.Lambda needs custom handling by the user.

      # Pooling Layers
      _QuantizeInfo(layers.AveragePooling1D, [], [], True),
      _QuantizeInfo(layers.AveragePooling2D, [], [], True),
      # _QuantizeInfo(layers.AveragePooling3D, [], [], True),
      _QuantizeInfo(layers.GlobalAveragePooling1D, [], [], True),
      _QuantizeInfo(layers.GlobalAveragePooling2D, [], [], True),
      _QuantizeInfo(layers.GlobalAveragePooling3D, [], [], True),
      _no_quantize(layers.GlobalMaxPooling1D),
      _no_quantize(layers.GlobalMaxPooling2D),
      _no_quantize(layers.GlobalMaxPooling3D),
      # _no_quantize(layers.MaxPooling1D),
      _no_quantize(layers.MaxPooling2D),
      # _no_quantize(layers.MaxPooling3D),

      # _QuantizeInfo(layers.LocallyConnected1D, ['kernel'], ['activation']),
      # _QuantizeInfo(layers.LocallyConnected2D, ['kernel'], ['activation']),
      _QuantizeInfo(layers.Add, [], [], True),

      # Enable once verified with TFLite behavior.
      # layers.Embedding: ['embeddings'],

      # BatchNormalization is handled elsewhere, in the cases
      # where it's preceded by convolutional layers.
      #   layers.BatchNormalization: [],

      # Merge layers to be added.

      # RNN Cells
      # TODO(pulkitb): Verify RNN layers behavior.
      # TODO(tfmot): check if we still need to allowlist via compat.v1 and
      # compat.v2 to support legacy TensorFlow 2.X
      # behavior where the v2 RNN uses the v1 RNNCell instead of the v2 RNNCell.
      # See b/145939875 for details.
      # _QuantizeInfo(tf.keras.layers.GRUCell, ['kernel', 'recurrent_kernel'],
      #               ['activation', 'recurrent_activation']),
      # _QuantizeInfo(tf.keras.layers.LSTMCell, ['kernel', 'recurrent_kernel'],
      #               ['activation', 'recurrent_activation']),
      # _QuantizeInfo(tf.keras.experimental.PeepholeLSTMCell,
      #               ['kernel', 'recurrent_kernel'],
      #               ['activation', 'recurrent_activation']),
      # _QuantizeInfo(tf.keras.layers.SimpleRNNCell,
      #               ['kernel', 'recurrent_kernel'],
      #               ['activation', 'recurrent_activation']),
  ]
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



tensorflow_model_optimization/python/core/quantization/keras/experimental/default_n_bit/default_n_bit_quantize_registry.py [81:189]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    quantize_registry.QuantizeRegistry, _RNNHelper):
  """QuantizationRegistry for built-in Keras classes for default 8-bit scheme."""

  # TODO(tfmot): expand layers test in quantize_functional_test.py
  # to add more layers to allowlist.
  _LAYER_QUANTIZE_INFO = [
      # Activation Layers
      _QuantizeInfo(layers.ReLU, [], [], True),
      _QuantizeInfo(layers.Softmax, [], []),
      # Enable once verified.
      # layers.ELU,
      _QuantizeInfo(layers.LeakyReLU, [], [], True),
      # layers.PReLU,
      # layers.ThresholdedReLU,

      # Convolution Layers
      # _QuantizeInfo(layers.Conv1D, ['kernel'], ['activation']),

      # layers.Conv2D is supported and handled in code below.
      # layers.DepthwiseConv2D is supported and handled in code below.

      # _QuantizeInfo(layers.Conv3D, ['kernel'], ['activation']),
      # _QuantizeInfo(layers.Conv3DTranspose, ['kernel'], ['activation']),
      _QuantizeInfo(layers.Concatenate, [], [], True),
      _no_quantize(layers.Cropping1D),
      _no_quantize(layers.Cropping2D),
      _no_quantize(layers.Cropping3D),
      # _no_quantize(layers.UpSampling1D),

      # TODO(tfmot): Reduce the quantization errors for bilinear interpolation
      # type for UpSampling2D op. UpSampling2D supports two interpolation types,
      # nearest and bilinear. we convert the op to ResizeBilnear integer op on
      # TFLite. This ResizeBilinear TFLite op only for input and output has the
      # same quantization parameters. (scale and zero_point) To do that, The
      # TFLite converter inserts quantization cast op right after the input to
      # match quantization params for the output. Current QAT doesn’t consider
      # this behavior yet, so now we have larger quantization errors than we
      # expected. We have to add support for it on QAT or change the TFLite
      # kernel op to support different quantization params for input and output.
      # (Note that the nearest case just copies the number so there’s no more
      # errors even if the quantization order is different.)
      _QuantizeInfo(layers.UpSampling2D, [], [], True),

      # _no_quantize(layers.UpSampling3D),
      _no_quantize(layers.ZeroPadding1D),
      _no_quantize(layers.ZeroPadding2D),
      # _no_quantize(layers.ZeroPadding3D),

      # Supported via modifications in Transforms.
      # layers.SeparableConv1D, layers.SeparableConv2D,

      # Core Layers
      _no_quantize(layers.ActivityRegularization),
      _QuantizeInfo(layers.Dense, ['kernel'], ['activation']),
      _no_quantize(layers.Dropout),
      _no_quantize(layers.Flatten),
      # _no_quantize(layers.Masking),
      _no_quantize(layers.Permute),
      # _no_quantize(layers.RepeatVector),
      _no_quantize(layers.Reshape),
      _no_quantize(layers.SpatialDropout1D),
      _no_quantize(layers.SpatialDropout2D),
      _no_quantize(layers.SpatialDropout3D),
      # layers.Lambda needs custom handling by the user.

      # Pooling Layers
      _QuantizeInfo(layers.AveragePooling1D, [], [], True),
      _QuantizeInfo(layers.AveragePooling2D, [], [], True),
      # _QuantizeInfo(layers.AveragePooling3D, [], [], True),
      _QuantizeInfo(layers.GlobalAveragePooling1D, [], [], True),
      _QuantizeInfo(layers.GlobalAveragePooling2D, [], [], True),
      _QuantizeInfo(layers.GlobalAveragePooling3D, [], [], True),
      _no_quantize(layers.GlobalMaxPooling1D),
      _no_quantize(layers.GlobalMaxPooling2D),
      _no_quantize(layers.GlobalMaxPooling3D),
      # _no_quantize(layers.MaxPooling1D),
      _no_quantize(layers.MaxPooling2D),
      # _no_quantize(layers.MaxPooling3D),

      # _QuantizeInfo(layers.LocallyConnected1D, ['kernel'], ['activation']),
      # _QuantizeInfo(layers.LocallyConnected2D, ['kernel'], ['activation']),
      _QuantizeInfo(layers.Add, [], [], True),

      # Enable once verified with TFLite behavior.
      # layers.Embedding: ['embeddings'],

      # BatchNormalization is handled elsewhere, in the cases
      # where it's preceded by convolutional layers.
      #   layers.BatchNormalization: [],

      # Merge layers to be added.

      # RNN Cells
      # TODO(pulkitb): Verify RNN layers behavior.
      # TODO(tfmot): check if we still need to allowlist via compat.v1 and
      # compat.v2 to support legacy TensorFlow 2.X
      # behavior where the v2 RNN uses the v1 RNNCell instead of the v2 RNNCell.
      # See b/145939875 for details.
      # _QuantizeInfo(tf.keras.layers.GRUCell, ['kernel', 'recurrent_kernel'],
      #               ['activation', 'recurrent_activation']),
      # _QuantizeInfo(tf.keras.layers.LSTMCell, ['kernel', 'recurrent_kernel'],
      #               ['activation', 'recurrent_activation']),
      # _QuantizeInfo(tf.keras.experimental.PeepholeLSTMCell,
      #               ['kernel', 'recurrent_kernel'],
      #               ['activation', 'recurrent_activation']),
      # _QuantizeInfo(tf.keras.layers.SimpleRNNCell,
      #               ['kernel', 'recurrent_kernel'],
      #               ['activation', 'recurrent_activation']),
  ]
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



