tensorflow_model_optimization/python/core/quantization/keras/default_8bit/default_8bit_quantize_registry.py [210:256]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
  def _is_supported_layer(self, layer_class):
    return layer_class in self._layer_quantize_map

  def _is_rnn_layer(self, layer):
    return layer.__class__ in {
        layers.GRU,
        layers.LSTM,
        layers.RNN,
        layers.SimpleRNN,
    }

  def _get_quantize_info(self, layer_class):
    return self._layer_quantize_map[layer_class]

  # Interface functions.

  def supports(self, layer):
    """Returns whether the registry supports this layer type.

    # TODO(pulkitb): Consider pushing this function up to the registry.

    Args:
      layer: The layer to check for support.

    Returns:
      True/False whether the layer type is supported.

    """
    if self._is_supported_layer(layer.__class__):
      return True

    if self._is_rnn_layer(layer):
      for rnn_cell in self._get_rnn_cells(layer):
        # All cells in the RNN layer should be supported.
        if not self._is_supported_layer(rnn_cell.__class__):
          return False
      return True

    return False

  def _get_quantize_config(self, layer_type):
    quantize_info = self._get_quantize_info(layer_type)

    # In case of `Activation`, there is no `_QuantizeInfo` object. It
    # directly stores a `QuantizeConfig`.
    if isinstance(quantize_info, QuantizeConfig):
      return quantize_info
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



tensorflow_model_optimization/python/core/quantization/keras/experimental/default_n_bit/default_n_bit_quantize_registry.py [238:284]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
  def _is_supported_layer(self, layer_class):
    return layer_class in self._layer_quantize_map

  def _is_rnn_layer(self, layer):
    return layer.__class__ in {
        layers.GRU,
        layers.LSTM,
        layers.RNN,
        layers.SimpleRNN,
    }

  def _get_quantize_info(self, layer_class):
    return self._layer_quantize_map[layer_class]

  # Interface functions.

  def supports(self, layer):
    """Returns whether the registry supports this layer type.

    # TODO(pulkitb): Consider pushing this function up to the registry.

    Args:
      layer: The layer to check for support.

    Returns:
      True/False whether the layer type is supported.

    """
    if self._is_supported_layer(layer.__class__):
      return True

    if self._is_rnn_layer(layer):
      for rnn_cell in self._get_rnn_cells(layer):
        # All cells in the RNN layer should be supported.
        if not self._is_supported_layer(rnn_cell.__class__):
          return False
      return True

    return False

  def _get_quantize_config(self, layer_type):
    quantize_info = self._get_quantize_info(layer_type)

    # In case of `Activation`, there is no `_QuantizeInfo` object. It
    # directly stores a `QuantizeConfig`.
    if isinstance(quantize_info, QuantizeConfig):
      return quantize_info
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



