def __post_init__()

in optimum/amd/brevitas/configuration.py [0:0]


    def __post_init__(self):
        if self.activations_quant_granularity == "per_group" and self.activations_group_size is None:
            self.activations_group_size = 64

        if self.weights_quant_granularity == "per_group" and self.weights_group_size is None:
            self.weights_group_size = 128

        if self.apply_gptq and self.gptq_act_order is None:
            self.gptq_act_order = False

        if self.is_static and self.activations_quant_granularity != "per_tensor":
            raise ValueError(
                f'Static quantization with activations_quant_granularity="{self.activations_quant_granularity}" is not supported. The quantization granularity must be activations_quant_granularity="per_tensor" when using static quantization.'
            )

        if self.weights_quant_granularity == "per_group" and self.weights_param_method == "mse":
            raise ValueError(
                'The quantization configuration `weights_quant_granularity="per_group"` is not supported along `weights_param_method="mse"`. Per group MSE weight quantization is not supported.'
            )

        if self.scale_precision == "power_of_two_scale" and (
            not self.weights_symmetric or not self.activations_symmetric
        ):
            raise ValueError(
                'The quantization configuration `scale_precision="power_of_two_scale"` is not supported along `weights_symmetric=True` or along `activations_symmetric=True`. Asymmetric quantization with power-of-two scale is not supported.'
            )

        if self.scale_precision == "power_of_two_scale" and self.weights_quant_granularity == "per_group":
            raise ValueError(
                'The quantization configuration `scale_precision="power_of_two_scale"` is not supported along `weights_quant_granularity="per_group"`. Per group quantization with power-of-two scale factors is not supported.'
            )

        if not self.is_static and self.activations_quant_granularity == "per_group" and not self.activations_symmetric:
            raise ValueError(
                'The quantization configuration `activations_quant_granularity="per_group"` is not supported along `activations_symmetric=False`. Asymmetric dynamic per group quantization is not supported.'
            )

        if self.scale_precision == "power_of_two_scale" and not self.is_static:
            raise ValueError(
                'The quantization configuration `scale_precision="power_of_two_scale"` is not supported along `is_static=False`. Dynamic activation quantization with power-of-two scale factor is not supported.'
            )

        if self.weights_only:
            self.activations_bitwidth = None
            self.activations_symmetric = None
            self.activations_equalization = None
            self.activations_group_size = None
            self.activations_param_method = None