in diffq/diffq.py [0:0]
def __init__(self, model: torch.nn.Module, min_size: float = 0.01, float16: bool = False,
group_size: int = 1, min_bits: float = 2, max_bits: float = 15,
param="bits", noise="gaussian",
init_bits: float = 8, extra_bits: float = 0, suffix: str = "_diffq",
exclude: tp.List[str] = [], detect_bound: bool = True):
"""
Differentiable quantizer based on scaled noise injection.
For every parameter `p` in the model, this introduces a number of bits parameter
`b` with the same dimensions (when group_size = 1).
Before each forward, `p` is replaced by `p + U`
with U uniform iid noise with range [-d/2, d/2], with `d` the uniform quantization
step for `b` bits.
This noise approximates the quantization noise in a differentiable manner, both
with respect to the unquantized parameter `p` and the number of bits `b`.
At eveluation (as detected with `model.eval()`), the model is replaced
by its true quantized version, and restored when going back to training.
When doing actual quantization (for serialization, or evaluation),
the number of bits is rounded to the nearest integer, and needs to be stored along.
This will cost a few bits per dimension. To reduce this cost, one can use `group_size`,
which will use a single noise level for multiple weight entries.
You can use the `DiffQuantizer.model_size` method to get a differentiable estimate of the
model size in MB. You can then use this estimate as a penalty in your training loss.
Args:
model (torch.nn.Module): model to quantize
min_size (float): minimum size in MB of a parameter to be quantized.
float16 (bool): if a layer is smaller than min_size, should we still do float16?
group_size (int): weight entries are groupped together to reduce the number
of noise scales to store. This should divide the size of all parameters
bigger than min_size.
min_bits (float): minimal number of bits.
max_bits (float): maximal number of bits.
init_bits (float): initial number of bits.
extra_bits (float): extra bits to add for actual quantization (before roundoff).
suffix (str): suffix used for the name of the extra noise scale parameters.
exclude (list[str]): list of patterns used to match parameters to exclude.
For instance `['bias']` to exclude all bias terms.
detect_bound (bool): if True, will detect bound parameters and reuse
the same quantized tensor for both, as well as the same number of bits.
..Warning::
You must call `model.training()` and `model.eval()` for `DiffQuantizer` work properly.
"""
self.group_size = group_size
self.min_bits = min_bits
self.max_bits = max_bits
self.init_bits = init_bits
self.extra_bits = extra_bits
self.suffix = suffix
self.param = param
self.noise = noise
assert noise in ["gaussian", "uniform"]
self._optimizer_setup = False
self._min_noise = 1 / (2 ** self.max_bits - 1)
self._max_noise = 1 / (2 ** self.min_bits - 1)
assert group_size >= 0
assert min_bits < init_bits < max_bits, \
"init_bits must be between min_bits and max_bits excluded3"
for name, _ in model.named_parameters():
if name.endswith(suffix):
raise RuntimeError("The model already has some noise scales parameters, "
"maybe you used twice a DiffQuantizer on the same model?.")
super().__init__(model, min_size, float16, exclude, detect_bound)