def get_converter_with_quantization()

in tensorflow_examples/lite/model_maker/core/task/configs.py [0:0]


  def get_converter_with_quantization(self, converter, **kwargs):
    """Gets TFLite converter with settings for quantization."""
    converter.optimizations = self.optimizations

    if self.representative_data is not None:
      ds = self.representative_data.gen_dataset(
          batch_size=1, is_training=False, **kwargs)
      converter.representative_dataset = tf.lite.RepresentativeDataset(
          _get_representative_dataset_gen(ds, self.quantization_steps))

    if self.inference_input_type:
      converter.inference_input_type = self.inference_input_type
    if self.inference_output_type:
      converter.inference_output_type = self.inference_output_type
    if self.supported_ops:
      converter.target_spec.supported_ops = self.supported_ops
    if self.supported_types:
      converter.target_spec.supported_types = self.supported_types

    if self.experimental_new_quantizer is not None:
      converter.experimental_new_quantizer = self.experimental_new_quantizer
    return converter