def __init__()

in quant/binary/weight_quantization.py [0:0]


    def __init__(self, size: int, k: int) -> None:
        """Construct a greedy-foldable quantizer with `k`-bits."""
        super(WeightQuantizerGF, self).__init__()
        self.k = k
        for i in range(1, k + 1):
            self.register_buffer(f'v{i}', torch.tensor([0.0] * size))