tensorflow_model_optimization/python/core/internal/tensor_encoding/stages/research/quantization.py [223:256]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
  @property
  def compressible_tensors_keys(self):
    """See base class."""
    return [self.ENCODED_VALUES_KEY]

  @property
  def commutes_with_sum(self):
    """See base class."""
    return False

  @property
  def decode_needs_input_shape(self):
    """See base class."""
    return True

  def get_params(self):
    """See base class."""
    params = collections.OrderedDict([(self.MAX_INT_VALUE_PARAMS_KEY,
                                       2**self._bits - 1)])
    return params, params

  def encode(self, x, encode_params):
    """See base class."""
    dim = tf.shape(x)[-1]
    x = tf.reshape(x, [-1, dim])

    # Per-channel min and max.
    min_x = tf.reduce_min(x, axis=0)
    max_x = tf.reduce_max(x, axis=0)

    max_value = tf.cast(encode_params[self.MAX_INT_VALUE_PARAMS_KEY], x.dtype)
    # Shift the values to range [0, max_value].
    # In the case of min_x == max_x, this will return all zeros.
    x = tf.compat.v1.div_no_nan(x - min_x, max_x - min_x) * max_value
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



tensorflow_model_optimization/python/core/internal/tensor_encoding/stages/research/quantization.py [348:381]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
  @property
  def compressible_tensors_keys(self):
    """See base class."""
    return [self.ENCODED_VALUES_KEY]

  @property
  def commutes_with_sum(self):
    """See base class."""
    return False

  @property
  def decode_needs_input_shape(self):
    """See base class."""
    return True

  def get_params(self):
    """See base class."""
    params = collections.OrderedDict([(self.MAX_INT_VALUE_PARAMS_KEY,
                                       2**self._bits - 1)])
    return params, params

  def encode(self, x, encode_params):
    """See base class."""
    dim = tf.shape(x)[-1]
    x = tf.reshape(x, [-1, dim])

    # Per-channel min and max.
    min_x = tf.reduce_min(x, axis=0)
    max_x = tf.reduce_max(x, axis=0)

    max_value = tf.cast(encode_params[self.MAX_INT_VALUE_PARAMS_KEY], x.dtype)
    # Shift the values to range [0, max_value].
    # In the case of min_x == max_x, this will return all zeros.
    x = tf.compat.v1.div_no_nan(x - min_x, max_x - min_x) * max_value
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



